Skip to content

Commit 0e935ae

Browse files
BernardMetzlerjgunthorpe
authored andcommitted
rdma/siw: iWarp wire packet format
Broken up commit to add the Soft iWarp RDMA driver. Signed-off-by: Bernard Metzler <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 09fbca8 commit 0e935ae

File tree

1 file changed

+380
-0
lines changed

1 file changed

+380
-0
lines changed

drivers/infiniband/sw/siw/iwarp.h

Lines changed: 380 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,380 @@
1+
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2+
3+
/* Authors: Bernard Metzler <[email protected]> */
4+
/* Copyright (c) 2008-2019, IBM Corporation */
5+
6+
#ifndef _IWARP_H
7+
#define _IWARP_H
8+
9+
#include <rdma/rdma_user_cm.h> /* RDMA_MAX_PRIVATE_DATA */
10+
#include <linux/types.h>
11+
#include <asm/byteorder.h>
12+
13+
#define RDMAP_VERSION 1
14+
#define DDP_VERSION 1
15+
#define MPA_REVISION_1 1
16+
#define MPA_REVISION_2 2
17+
#define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
18+
#define MPA_KEY_REQ "MPA ID Req Frame"
19+
#define MPA_KEY_REP "MPA ID Rep Frame"
20+
#define MPA_IRD_ORD_MASK 0x3fff
21+
22+
struct mpa_rr_params {
23+
__be16 bits;
24+
__be16 pd_len;
25+
};
26+
27+
/*
28+
* MPA request/response header bits & fields
29+
*/
30+
enum {
31+
MPA_RR_FLAG_MARKERS = cpu_to_be16(0x8000),
32+
MPA_RR_FLAG_CRC = cpu_to_be16(0x4000),
33+
MPA_RR_FLAG_REJECT = cpu_to_be16(0x2000),
34+
MPA_RR_FLAG_ENHANCED = cpu_to_be16(0x1000),
35+
MPA_RR_FLAG_GSO_EXP = cpu_to_be16(0x0800),
36+
MPA_RR_MASK_REVISION = cpu_to_be16(0x00ff)
37+
};
38+
39+
/*
40+
* MPA request/reply header
41+
*/
42+
struct mpa_rr {
43+
__u8 key[16];
44+
struct mpa_rr_params params;
45+
};
46+
47+
static inline void __mpa_rr_set_revision(__be16 *bits, u8 rev)
48+
{
49+
*bits = (*bits & ~MPA_RR_MASK_REVISION) |
50+
(cpu_to_be16(rev) & MPA_RR_MASK_REVISION);
51+
}
52+
53+
static inline u8 __mpa_rr_revision(__be16 mpa_rr_bits)
54+
{
55+
__be16 rev = mpa_rr_bits & MPA_RR_MASK_REVISION;
56+
57+
return be16_to_cpu(rev);
58+
}
59+
60+
enum mpa_v2_ctrl {
61+
MPA_V2_PEER_TO_PEER = cpu_to_be16(0x8000),
62+
MPA_V2_ZERO_LENGTH_RTR = cpu_to_be16(0x4000),
63+
MPA_V2_RDMA_WRITE_RTR = cpu_to_be16(0x8000),
64+
MPA_V2_RDMA_READ_RTR = cpu_to_be16(0x4000),
65+
MPA_V2_RDMA_NO_RTR = cpu_to_be16(0x0000),
66+
MPA_V2_MASK_IRD_ORD = cpu_to_be16(0x3fff)
67+
};
68+
69+
struct mpa_v2_data {
70+
__be16 ird;
71+
__be16 ord;
72+
};
73+
74+
struct mpa_marker {
75+
__be16 rsvd;
76+
__be16 fpdu_hmd; /* FPDU header-marker distance (= MPA's FPDUPTR) */
77+
};
78+
79+
/*
80+
* maximum MPA trailer
81+
*/
82+
struct mpa_trailer {
83+
__u8 pad[4];
84+
__be32 crc;
85+
};
86+
87+
#define MPA_HDR_SIZE 2
88+
#define MPA_CRC_SIZE 4
89+
90+
/*
91+
* Common portion of iWARP headers (MPA, DDP, RDMAP)
92+
* for any FPDU
93+
*/
94+
struct iwarp_ctrl {
95+
__be16 mpa_len;
96+
__be16 ddp_rdmap_ctrl;
97+
};
98+
99+
/*
100+
* DDP/RDMAP Hdr bits & fields
101+
*/
102+
enum {
103+
DDP_FLAG_TAGGED = cpu_to_be16(0x8000),
104+
DDP_FLAG_LAST = cpu_to_be16(0x4000),
105+
DDP_MASK_RESERVED = cpu_to_be16(0x3C00),
106+
DDP_MASK_VERSION = cpu_to_be16(0x0300),
107+
RDMAP_MASK_VERSION = cpu_to_be16(0x00C0),
108+
RDMAP_MASK_RESERVED = cpu_to_be16(0x0030),
109+
RDMAP_MASK_OPCODE = cpu_to_be16(0x000f)
110+
};
111+
112+
static inline u8 __ddp_get_version(struct iwarp_ctrl *ctrl)
113+
{
114+
return be16_to_cpu(ctrl->ddp_rdmap_ctrl & DDP_MASK_VERSION) >> 8;
115+
}
116+
117+
static inline void __ddp_set_version(struct iwarp_ctrl *ctrl, u8 version)
118+
{
119+
ctrl->ddp_rdmap_ctrl =
120+
(ctrl->ddp_rdmap_ctrl & ~DDP_MASK_VERSION) |
121+
(cpu_to_be16((u16)version << 8) & DDP_MASK_VERSION);
122+
}
123+
124+
static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl)
125+
{
126+
__be16 ver = ctrl->ddp_rdmap_ctrl & RDMAP_MASK_VERSION;
127+
128+
return be16_to_cpu(ver) >> 6;
129+
}
130+
131+
static inline void __rdmap_set_version(struct iwarp_ctrl *ctrl, u8 version)
132+
{
133+
ctrl->ddp_rdmap_ctrl = (ctrl->ddp_rdmap_ctrl & ~RDMAP_MASK_VERSION) |
134+
(cpu_to_be16(version << 6) & RDMAP_MASK_VERSION);
135+
}
136+
137+
static inline u8 __rdmap_get_opcode(struct iwarp_ctrl *ctrl)
138+
{
139+
return be16_to_cpu(ctrl->ddp_rdmap_ctrl & RDMAP_MASK_OPCODE);
140+
}
141+
142+
static inline void __rdmap_set_opcode(struct iwarp_ctrl *ctrl, u8 opcode)
143+
{
144+
ctrl->ddp_rdmap_ctrl = (ctrl->ddp_rdmap_ctrl & ~RDMAP_MASK_OPCODE) |
145+
(cpu_to_be16(opcode) & RDMAP_MASK_OPCODE);
146+
}
147+
148+
struct iwarp_rdma_write {
149+
struct iwarp_ctrl ctrl;
150+
__be32 sink_stag;
151+
__be64 sink_to;
152+
};
153+
154+
struct iwarp_rdma_rreq {
155+
struct iwarp_ctrl ctrl;
156+
__be32 rsvd;
157+
__be32 ddp_qn;
158+
__be32 ddp_msn;
159+
__be32 ddp_mo;
160+
__be32 sink_stag;
161+
__be64 sink_to;
162+
__be32 read_size;
163+
__be32 source_stag;
164+
__be64 source_to;
165+
};
166+
167+
struct iwarp_rdma_rresp {
168+
struct iwarp_ctrl ctrl;
169+
__be32 sink_stag;
170+
__be64 sink_to;
171+
};
172+
173+
struct iwarp_send {
174+
struct iwarp_ctrl ctrl;
175+
__be32 rsvd;
176+
__be32 ddp_qn;
177+
__be32 ddp_msn;
178+
__be32 ddp_mo;
179+
};
180+
181+
struct iwarp_send_inv {
182+
struct iwarp_ctrl ctrl;
183+
__be32 inval_stag;
184+
__be32 ddp_qn;
185+
__be32 ddp_msn;
186+
__be32 ddp_mo;
187+
};
188+
189+
struct iwarp_terminate {
190+
struct iwarp_ctrl ctrl;
191+
__be32 rsvd;
192+
__be32 ddp_qn;
193+
__be32 ddp_msn;
194+
__be32 ddp_mo;
195+
#if defined(__LITTLE_ENDIAN_BITFIELD)
196+
__be32 layer : 4;
197+
__be32 etype : 4;
198+
__be32 ecode : 8;
199+
__be32 flag_m : 1;
200+
__be32 flag_d : 1;
201+
__be32 flag_r : 1;
202+
__be32 reserved : 13;
203+
#elif defined(__BIG_ENDIAN_BITFIELD)
204+
__be32 reserved : 13;
205+
__be32 flag_r : 1;
206+
__be32 flag_d : 1;
207+
__be32 flag_m : 1;
208+
__be32 ecode : 8;
209+
__be32 etype : 4;
210+
__be32 layer : 4;
211+
#else
212+
#error "undefined byte order"
213+
#endif
214+
};
215+
216+
/*
217+
* Terminate Hdr bits & fields
218+
*/
219+
enum {
220+
TERM_MASK_LAYER = cpu_to_be32(0xf0000000),
221+
TERM_MASK_ETYPE = cpu_to_be32(0x0f000000),
222+
TERM_MASK_ECODE = cpu_to_be32(0x00ff0000),
223+
TERM_FLAG_M = cpu_to_be32(0x00008000),
224+
TERM_FLAG_D = cpu_to_be32(0x00004000),
225+
TERM_FLAG_R = cpu_to_be32(0x00002000),
226+
TERM_MASK_RESVD = cpu_to_be32(0x00001fff)
227+
};
228+
229+
static inline u8 __rdmap_term_layer(struct iwarp_terminate *term)
230+
{
231+
return term->layer;
232+
}
233+
234+
static inline void __rdmap_term_set_layer(struct iwarp_terminate *term,
235+
u8 layer)
236+
{
237+
term->layer = layer & 0xf;
238+
}
239+
240+
static inline u8 __rdmap_term_etype(struct iwarp_terminate *term)
241+
{
242+
return term->etype;
243+
}
244+
245+
static inline void __rdmap_term_set_etype(struct iwarp_terminate *term,
246+
u8 etype)
247+
{
248+
term->etype = etype & 0xf;
249+
}
250+
251+
static inline u8 __rdmap_term_ecode(struct iwarp_terminate *term)
252+
{
253+
return term->ecode;
254+
}
255+
256+
static inline void __rdmap_term_set_ecode(struct iwarp_terminate *term,
257+
u8 ecode)
258+
{
259+
term->ecode = ecode;
260+
}
261+
262+
/*
263+
* Common portion of iWARP headers (MPA, DDP, RDMAP)
264+
* for an FPDU carrying an untagged DDP segment
265+
*/
266+
struct iwarp_ctrl_untagged {
267+
struct iwarp_ctrl ctrl;
268+
__be32 rsvd;
269+
__be32 ddp_qn;
270+
__be32 ddp_msn;
271+
__be32 ddp_mo;
272+
};
273+
274+
/*
275+
* Common portion of iWARP headers (MPA, DDP, RDMAP)
276+
* for an FPDU carrying a tagged DDP segment
277+
*/
278+
struct iwarp_ctrl_tagged {
279+
struct iwarp_ctrl ctrl;
280+
__be32 ddp_stag;
281+
__be64 ddp_to;
282+
};
283+
284+
union iwarp_hdr {
285+
struct iwarp_ctrl ctrl;
286+
struct iwarp_ctrl_untagged c_untagged;
287+
struct iwarp_ctrl_tagged c_tagged;
288+
struct iwarp_rdma_write rwrite;
289+
struct iwarp_rdma_rreq rreq;
290+
struct iwarp_rdma_rresp rresp;
291+
struct iwarp_terminate terminate;
292+
struct iwarp_send send;
293+
struct iwarp_send_inv send_inv;
294+
};
295+
296+
enum term_elayer {
297+
TERM_ERROR_LAYER_RDMAP = 0x00,
298+
TERM_ERROR_LAYER_DDP = 0x01,
299+
TERM_ERROR_LAYER_LLP = 0x02 /* eg., MPA */
300+
};
301+
302+
enum ddp_etype {
303+
DDP_ETYPE_CATASTROPHIC = 0x0,
304+
DDP_ETYPE_TAGGED_BUF = 0x1,
305+
DDP_ETYPE_UNTAGGED_BUF = 0x2,
306+
DDP_ETYPE_RSVD = 0x3
307+
};
308+
309+
enum ddp_ecode {
310+
/* unspecified, set to zero */
311+
DDP_ECODE_CATASTROPHIC = 0x00,
312+
/* Tagged Buffer Errors */
313+
DDP_ECODE_T_INVALID_STAG = 0x00,
314+
DDP_ECODE_T_BASE_BOUNDS = 0x01,
315+
DDP_ECODE_T_STAG_NOT_ASSOC = 0x02,
316+
DDP_ECODE_T_TO_WRAP = 0x03,
317+
DDP_ECODE_T_VERSION = 0x04,
318+
/* Untagged Buffer Errors */
319+
DDP_ECODE_UT_INVALID_QN = 0x01,
320+
DDP_ECODE_UT_INVALID_MSN_NOBUF = 0x02,
321+
DDP_ECODE_UT_INVALID_MSN_RANGE = 0x03,
322+
DDP_ECODE_UT_INVALID_MO = 0x04,
323+
DDP_ECODE_UT_MSG_TOOLONG = 0x05,
324+
DDP_ECODE_UT_VERSION = 0x06
325+
};
326+
327+
enum rdmap_untagged_qn {
328+
RDMAP_UNTAGGED_QN_SEND = 0,
329+
RDMAP_UNTAGGED_QN_RDMA_READ = 1,
330+
RDMAP_UNTAGGED_QN_TERMINATE = 2,
331+
RDMAP_UNTAGGED_QN_COUNT = 3
332+
};
333+
334+
enum rdmap_etype {
335+
RDMAP_ETYPE_CATASTROPHIC = 0x0,
336+
RDMAP_ETYPE_REMOTE_PROTECTION = 0x1,
337+
RDMAP_ETYPE_REMOTE_OPERATION = 0x2
338+
};
339+
340+
enum rdmap_ecode {
341+
RDMAP_ECODE_INVALID_STAG = 0x00,
342+
RDMAP_ECODE_BASE_BOUNDS = 0x01,
343+
RDMAP_ECODE_ACCESS_RIGHTS = 0x02,
344+
RDMAP_ECODE_STAG_NOT_ASSOC = 0x03,
345+
RDMAP_ECODE_TO_WRAP = 0x04,
346+
RDMAP_ECODE_VERSION = 0x05,
347+
RDMAP_ECODE_OPCODE = 0x06,
348+
RDMAP_ECODE_CATASTROPHIC_STREAM = 0x07,
349+
RDMAP_ECODE_CATASTROPHIC_GLOBAL = 0x08,
350+
RDMAP_ECODE_CANNOT_INVALIDATE = 0x09,
351+
RDMAP_ECODE_UNSPECIFIED = 0xff
352+
};
353+
354+
enum llp_ecode {
355+
LLP_ECODE_TCP_STREAM_LOST = 0x01, /* How to transfer this ?? */
356+
LLP_ECODE_RECEIVED_CRC = 0x02,
357+
LLP_ECODE_FPDU_START = 0x03,
358+
LLP_ECODE_INVALID_REQ_RESP = 0x04,
359+
360+
/* Errors for Enhanced Connection Establishment only */
361+
LLP_ECODE_LOCAL_CATASTROPHIC = 0x05,
362+
LLP_ECODE_INSUFFICIENT_IRD = 0x06,
363+
LLP_ECODE_NO_MATCHING_RTR = 0x07
364+
};
365+
366+
enum llp_etype { LLP_ETYPE_MPA = 0x00 };
367+
368+
enum rdma_opcode {
369+
RDMAP_RDMA_WRITE = 0x0,
370+
RDMAP_RDMA_READ_REQ = 0x1,
371+
RDMAP_RDMA_READ_RESP = 0x2,
372+
RDMAP_SEND = 0x3,
373+
RDMAP_SEND_INVAL = 0x4,
374+
RDMAP_SEND_SE = 0x5,
375+
RDMAP_SEND_SE_INVAL = 0x6,
376+
RDMAP_TERMINATE = 0x7,
377+
RDMAP_NOT_SUPPORTED = RDMAP_TERMINATE + 1
378+
};
379+
380+
#endif

0 commit comments

Comments
 (0)