PKGCONF ?= pkg-config
-DPDK_CFLAGS= -Wall -Wno-deprecated-declarations -Werror $(shell $(PKGCONF) --cflags libdpdk)
+DPDK_CFLAGS= -Wall -Wextra -Wno-deprecated-declarations -Werror $(shell $(PKGCONF) --cflags libdpdk)
KERNPREINCLUDES:= ${INCLUDES}
INCLUDES= -I${OVERRIDE_INCLUDES_ROOT} ${KERNPREINCLUDES}
parse_lcore_mask(struct ff_config *cfg, const char *coremask)
{
int i, j, idx = 0, shift = 0, zero_num = 0;
- unsigned count = 0;
+ int count = 0;
char c;
int val;
uint16_t *proc_lcore;
snprintf(buf, sizeof(buf) - 1, "%llx%s",
(unsigned long long)1<<shift, zero);
cfg->dpdk.proc_mask = strdup(buf);
- }
+ }
count++;
}
}
}
static int
-rss_tbl_cfg_handler(struct ff_config *cfg, struct ff_rss_check_cfg *cur)
+rss_tbl_cfg_handler(struct ff_rss_check_cfg *cur)
{
//vip cfg
int ret, nb_rss_tbl, i, j, k;
}
static int
-rss_check_cfg_handler(struct ff_config *cfg, const char *section,
+rss_check_cfg_handler(struct ff_config *cfg, __rte_unused const char *section,
const char *name, const char *value)
{
if (cfg->dpdk.port_cfgs == NULL && cfg->dpdk.vlan_cfgs == NULL) {
} else if (strcmp(name, "rss_tbl") == 0) {
cur->rss_tbl_str = strdup(value);
if (cur->rss_tbl_str) {
- return rss_tbl_cfg_handler(cfg, cur);
+ return rss_tbl_cfg_handler(cur);
}
}
struct ff_rss_check_cfg {
int enable;
- uint32_t nb_rss_tbl;
+ int nb_rss_tbl;
char *rss_tbl_str;
struct ff_rss_tbl_cfg rss_tbl_cfgs[FF_RSS_TBL_MAX_ENTRIES];
};
return;
}
- int reta_conf_size = RTE_MAX(1, reta_size / RTE_ETH_RETA_GROUP_SIZE);
+ unsigned reta_conf_size = RTE_MAX(1, reta_size / RTE_ETH_RETA_GROUP_SIZE);
struct rte_eth_rss_reta_entry64 reta_conf[reta_conf_size];
/* config HW indirection table */
ff_get_current_time(&sec, &nsec);
if (sec > last_sec) {
- if (kni_rate_limt.gerneal_packets > ff_global_cfg.kni.general_packets_ratelimit ||
- kni_rate_limt.console_packets > ff_global_cfg.kni.console_packets_ratelimit ||
- kni_rate_limt.kernel_packets > ff_global_cfg.kni.kernel_packets_ratelimit) {
+ if (kni_rate_limt.gerneal_packets > (uint64_t)ff_global_cfg.kni.general_packets_ratelimit ||
+ kni_rate_limt.console_packets > (uint64_t)ff_global_cfg.kni.console_packets_ratelimit ||
+ kni_rate_limt.kernel_packets > (uint64_t)ff_global_cfg.kni.kernel_packets_ratelimit) {
printf("kni ratelimit, general:%lu/%d, console:%lu/%d, kernel:%lu/%d, last sec:%ld, sec:%ld\n",
kni_rate_limt.gerneal_packets, ff_global_cfg.kni.general_packets_ratelimit,
kni_rate_limt.console_packets, ff_global_cfg.kni.console_packets_ratelimit,
#ifdef FF_KNI_KNI
/* Currently we don't support change mtu. */
static int
-kni_change_mtu(uint16_t port_id, unsigned new_mtu)
+kni_change_mtu(__rte_unused uint16_t port_id, __rte_unused unsigned new_mtu)
{
return 0;
}
#endif
static int
-kni_process_tx(uint16_t port_id, uint16_t queue_id,
+kni_process_tx(uint16_t port_id, __rte_unused uint16_t queue_id,
struct rte_mbuf **pkts_burst, unsigned count)
{
/* read packet from kni ring(phy port) and transmit to kni */
* If there are too many processes, there is also the possibility that the control packet will be ratelimited.
*/
if (ff_global_cfg.kni.kernel_packets_ratelimit) {
- if (likely(kni_rate_limt.kernel_packets < ff_global_cfg.kni.kernel_packets_ratelimit)) {
+ if (likely(kni_rate_limt.kernel_packets < (uint64_t)ff_global_cfg.kni.kernel_packets_ratelimit)) {
nb_to_tx = nb_tx;
} else {
nb_to_tx = 0;
}
void
-ff_kni_alloc(uint16_t port_id, unsigned socket_id, int type, int port_idx,
- struct rte_mempool *mbuf_pool, unsigned ring_queue_size)
+ff_kni_alloc(uint16_t port_id, unsigned socket_id, __rte_unused int type, int port_idx,
+ __rte_unused struct rte_mempool *mbuf_pool, unsigned ring_queue_size)
{
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
struct rte_ether_addr addr = {{0}};
if (filter >= FILTER_ARP) {
if (ff_global_cfg.kni.console_packets_ratelimit) {
kni_rate_limt.console_packets++;
- if (kni_rate_limt.console_packets > ff_global_cfg.kni.console_packets_ratelimit) {
+ if (kni_rate_limt.console_packets > (uint64_t)ff_global_cfg.kni.console_packets_ratelimit) {
goto error;
}
}
} else {
if (ff_global_cfg.kni.general_packets_ratelimit) {
kni_rate_limt.gerneal_packets++;
- if (kni_rate_limt.gerneal_packets > ff_global_cfg.kni.general_packets_ratelimit) {
+ if (kni_rate_limt.gerneal_packets > (uint64_t)ff_global_cfg.kni.general_packets_ratelimit) {
goto error;
}
}
return ff_kevent(epfd, kev, changes, NULL, 0, NULL);
}
-static void
+static void
ff_event_to_epoll(void **ev, struct kevent *kev)
{
unsigned int event_one = 0;
(*ppev)++;
}
-int
+int
ff_epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
{
int i, ret;
+ (void)timeout;
if (!events || maxevents < 1) {
errno = EINVAL;
return -1;
void *ret = (mmap(addr, len, host_prot, host_flags, fd, offset));
- if ((uint64_t)ret == -1) {
+ if (ret == MAP_FAILED) {
printf("fst mmap failed:%s\n", strerror(errno));
exit(1);
}
case ff_EPIPE: errno = EPIPE; break;
case ff_EDOM: errno = EDOM; break;
case ff_ERANGE: errno = ERANGE; break;
-
+
/* case ff_EAGAIN: same as EWOULDBLOCK */
case ff_EWOULDBLOCK: errno = EWOULDBLOCK; break;
-
+
case ff_EINPROGRESS: errno = EINPROGRESS; break;
case ff_EALREADY: errno = EALREADY; break;
case ff_ENOTSOCK: errno = ENOTSOCK; break;