make -C tools/testing/selftests TARGETS=net/forwarding TEST_PROGS=bridge_mmdb_max.sh TEST_GEN_PROGS="" run_tests
make: Entering directory '/home/virtme/testing-4/tools/testing/selftests'
make[1]: Entering directory '/home/virtme/testing-4/tools/testing/selftests/net/forwarding'
make[1]: Nothing to be done for 'all'.
make[1]: Leaving directory '/home/virtme/testing-4/tools/testing/selftests/net/forwarding'
make[1]: Entering directory '/home/virtme/testing-4/tools/testing/selftests/net/forwarding'
TAP version 13
1..1
# overriding timeout to 21600
# selftests: net/forwarding: bridge_mdb_max.sh
[ 25.760828][ T233] ip (233) used greatest stack depth: 23328 bytes left
[ 30.139941][ T275] 8021q: 802.1Q VLAN Support v1.8
# INFO: 802.1d tests
[ 35.264754][ T308] br0: port 1(veth1) entered blocking state
[ 35.265628][ T308] br0: port 1(veth1) entered disabled state
[ 35.266144][ T308] veth1: entered allmulticast mode
[ 35.275836][ T308] veth1: entered promiscuous mode
[ 35.432693][ T47] br0: port 1(veth1) entered blocking state
[ 35.433086][ T47] br0: port 1(veth1) entered forwarding state
[ 35.792050][ T311] br0: port 2(veth2) entered blocking state
[ 35.792628][ T311] br0: port 2(veth2) entered disabled state
[ 35.793198][ T311] veth2: entered allmulticast mode
[ 35.796399][ T311] veth2: entered promiscuous mode
[ 35.966235][ T49] br0: port 2(veth2) entered blocking state
[ 35.966586][ T49] br0: port 2(veth2) entered forwarding state
[ 35.976612][ C3] ------------[ cut here ]------------
[ 35.976981][ C3] UBSAN: invalid-load in ./include/linux/skbuff.h:4267:9
[ 35.977360][ C3] load of value 107 is not a valid value for type '_Bool'
[ 35.977719][ C3] CPU: 3 PID: 49 Comm: kworker/3:1 Not tainted 6.8.0-rc4-virtme #1
[ 35.978120][ C3] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
[ 35.978737][ C3] Workqueue: mld mld_ifc_work
[ 35.978986][ C3] Call Trace:
[ 35.979188][ C3]
[ 35.979343][ C3] dump_stack_lvl+0x92/0xb0
[ 35.979584][ C3] __ubsan_handle_load_invalid_value+0xa7/0xe0
[ 35.979908][ C3] br_forward_finish.cold+0xf/0x3c [bridge]
[ 35.980263][ C3] deliver_clone+0x52/0x90 [bridge]
[ 35.980622][ C3] br_handle_frame_finish+0xbdd/0x1ce0 [bridge]
[ 35.980987][ C3] ? __pfx_br_handle_frame_finish+0x10/0x10 [bridge]
[ 35.981362][ C3] ? kernel_text_address+0x17/0xe0
[ 35.981643][ C3] ? hlock_class+0x4e/0x130
[ 35.981947][ C3] br_handle_frame+0x612/0xe70 [bridge]
[ 35.982290][ C3] ? __pfx_br_handle_frame+0x10/0x10 [bridge]
[ 35.982663][ C3] __netif_receive_skb_core.constprop.0+0x783/0x2dc0
[ 35.983012][ C3] ? mark_lock+0x38/0x3e0
[ 35.983244][ C3] ? __
DETECTED CRASH, lowering timeout
lock_acquire+0xb67/0x1610
[ 35.983503][ C3] ? __pfx___netif_receive_skb_core.constprop.0+0x10/0x10
[ 35.983874][ C3] ? lock_acquire.part.0+0xe5/0x330
[ 35.984147][ C3] ? process_backlog+0x1ed/0x5e0
[ 35.984426][ C3] __netif_receive_skb_one_core+0xaf/0x1b0
[ 35.984759][ C3] ? __pfx___netif_receive_skb_one_core+0x10/0x10
[ 35.985091][ C3] ? lock_acquire+0x1c1/0x220
[ 35.985367][ C3] ? process_backlog+0x1ed/0x5e0
[ 35.985628][ C3] process_backlog+0xd3/0x5e0
[ 35.985882][ C3] __napi_poll.constprop.0+0xa2/0x460
[ 35.986196][ C3] net_rx_action+0x440/0xb40
[ 35.986447][ C3] ? __pfx_net_rx_action+0x10/0x10
[ 35.986741][ C3] ? hlock_class+0x20/0x130
[ 35.986979][ C3] ? mark_held_locks+0xa5/0xf0
[ 35.987260][ C3] ? hrtimer_interrupt+0x31a/0x770
[ 35.987530][ C3] __do_softirq+0x1b9/0x7ff
[ 35.987795][ C3] ? __dev_queue_xmit+0x6cd/0x17e0
[ 35.988190][ C3] do_softirq+0x4d/0xa0
[ 35.988541][ C3]
[ 35.988770][ C3]
[ 35.988999][ C3] __local_bh_enable_ip+0xf6/0x120
[ 35.989413][ C3] ? __dev_queue_xmit+0x6cd/0x17e0
[ 35.989846][ C3] __dev_queue_xmit+0x6e2/0x17e0
[ 35.990291][ C3] ? mark_lock+0x38/0x3e0
[ 35.990628][ C3] ? mark_lock+0x38/0x3e0
[ 35.991023][ C3] ? mark_held_locks+0xa5/0xf0
[ 35.991427][ C3] ? eth_header+0x158/0x1a0
[ 35.991819][ C3] ? __pfx___dev_queue_xmit+0x10/0x10
[ 35.992307][ C3] ip6_finish_output2+0x4ab/0xf80
[ 35.992719][ C3] ip6_finish_output+0x4ed/0xd30
[ 35.993163][ C3] ip6_output+0x1f3/0x770
[ 35.993525][ C3] ? __pfx_ip6_output+0x10/0x10
[ 35.993941][ C3] ? mark_held_locks+0xa5/0xf0
[ 35.994195][ C3] ? lockdep_hardirqs_on_prepare.part.0+0x151/0x370
[ 35.994585][ C3] NF_HOOK.constprop.0+0xe2/0x680
[ 35.994848][ C3] ? __pfx_NF_HOOK.constprop.0+0x10/0x10
[ 35.995171][ C3] ? __pfx_xfrm_lookup_with_ifid+0x10/0x10
[ 35.995472][ C3] ? lockdep_hardirqs_on_prepare.part.0+0x1b1/0x370
[ 35.995828][ C3] ? icmp6_dst_alloc+0x2c0/0x450
[ 35.996101][ C3] ? __local_bh_enable_ip+0xa6/0x120
[ 35.996378][ C3] mld_sendpack+0x62c/0xbc0
[ 35.996736][ C3] ? __pfx_mld_sendpack+0x10/0x10
[ 35.997076][ C3] ? mld_send_cr+0x3a1/0x780
[ 35.997337][ C3] mld_ifc_work+0x36/0x200
[ 35.997633][ C3] process_one_work+0x78c/0x1310
[ 35.997898][ C3] ? hlock_class+0x4e/0x130
[ 35.998136][ C3] ? __pfx_process_one_work+0x10/0x10
[ 35.998458][ C3] ? assign_work+0x16c/0x240
[ 35.998711][ C3] worker_thread+0x73d/0x1010
[ 35.998970][ C3] ? lockdep_hardirqs_on_prepare.part.0+0x1b1/0x370
[ 35.999327][ C3] ? __pfx_worker_thread+0x10/0x10
[ 35.999613][ C3] ? __pfx_worker_thread+0x10/0x10
[ 35.999901][ C3] kthread+0x28f/0x360
[ 36.000113][ C3] ? __pfx_kthread+0x10/0x10
[ 36.000355][ C3] ret_from_fork+0x31/0x70
[ 36.000617][ C3] ? __pfx_kthread+0x10/0x10
[ 36.000856][ C3] ret_from_fork_asm+0x1b/0x30
[ 36.001143][ C3]
[ 36.001328][ C3] ---[ end trace ]---
# TEST: cfg4: port: ngroups reporting [ OK ]
# TEST: ctl4: port: ngroups reporting [ OK ]
# TEST: cfg6: port: ngroups reporting [ OK ]
# TEST: ctl6: port: ngroups reporting [ OK ]
# TEST: cfg4: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: cfg4: port maxgroups: configure below ngroups [ OK ]
# TEST: cfg4: port maxgroups: add too many MDB entries [ OK ]
# TEST: ctl4: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: ctl4: port maxgroups: configure below ngroups [ OK ]
# TEST: ctl4: port maxgroups: add too many MDB entries [ OK ]
# TEST: cfg6: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: cfg6: port maxgroups: configure below ngroups [ OK ]
# TEST: cfg6: port maxgroups: add too many MDB entries [ OK ]
# TEST: ctl6: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: ctl6: port maxgroups: configure below ngroups [ OK ]
# TEST: ctl6: port maxgroups: add too many MDB entries [ OK ]
[ 140.579773][ T2078] br0: port 2(veth2) entered disabled state
[ 140.755897][ T2080] veth2: left allmulticast mode
[ 140.756227][ T2080] veth2: left promiscuous mode
[ 140.756635][ T2080] br0: port 2(veth2) entered disabled state
[ 140.935965][ T2081] br0: port 1(veth1) entered disabled state
[ 141.100235][ T2083] veth1: left allmulticast mode
[ 141.100655][ T2083] veth1: left promiscuous mode
[ 141.101274][ T2083] br0: port 1(veth1) entered disabled state
# INFO: 802.1q tests
[ 142.293749][ T2093] br0: port 1(veth1) entered blocking state
[ 142.294141][ T2093] br0: port 1(veth1) entered disabled state
[ 142.294499][ T2093] veth1: entered allmulticast mode
[ 142.296585][ T2093] veth1: entered promiscuous mode
[ 142.473978][ T28] br0: port 1(veth1) entered blocking state
[ 142.474372][ T28] br0: port 1(veth1) entered forwarding state
[ 143.172425][ T2098] br0: port 2(veth2) entered blocking state
[ 143.172781][ T2098] br0: port 2(veth2) entered disabled state
[ 143.173173][ T2098] veth2: entered allmulticast mode
[ 143.175995][ T2098] veth2: entered promiscuous mode
[ 143.335762][ T28] br0: port 2(veth2) entered blocking state
[ 143.336107][ T28] br0: port 2(veth2) entered forwarding state
# TEST: port_vlan: presence of ngroups and maxgroups attributes [ OK ]
# TEST: cfg4: port: ngroups reporting [ OK ]
# TEST: ctl4: port: ngroups reporting [ OK ]
# TEST: cfg6: port: ngroups reporting [ OK ]
# TEST: ctl6: port: ngroups reporting [ OK ]
# TEST: cfg4: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: cfg4: port maxgroups: configure below ngroups [ OK ]
# TEST: cfg4: port maxgroups: add too many MDB entries [ OK ]
# TEST: ctl4: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: ctl4: port maxgroups: configure below ngroups [ OK ]
# TEST: ctl4: port maxgroups: add too many MDB entries [ OK ]
# TEST: cfg6: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: cfg6: port maxgroups: configure below ngroups [ OK ]
# TEST: cfg6: port maxgroups: add too many MDB entries [ OK ]
# TEST: ctl6: port maxgroups: reporting and treatment of 0 [ OK ]
# TEST: ctl6: port maxgroups: configure below ngroups [ OK ]
# TEST: ctl6: port maxgroups: add too many MDB entries [ OK ]
[ 250.608597][ T3873] br0: port 2(veth2) entered disabled state
[ 250.767752][ T3874] veth2: left allmulticast mode
[ 250.768047][ T3874] veth2: left promiscuous mode
[ 250.768457][ T3874] br0: port 2(veth2) entered disabled state
[ 250.928811][ T3875] br0: port 1(veth1) entered disabled state
[ 251.077990][ T3876] veth1: left allmulticast mode
[ 251.079099][ T3876] veth1: left promiscuous mode
[ 251.079508][ T3876] br0: port 1(veth1) entered disabled state
# INFO: 802.1q mcast_vlan_snooping 1 tests
[ 252.201419][ T3886] br0: port 1(veth1) entered blocking state
[ 252.201761][ T3886] br0: port 1(veth1) entered disabled state
[ 252.202112][ T3886] veth1: entered allmulticast mode
[ 252.204871][ T3886] veth1: entered promiscuous mode
[ 252.365848][ T2082] br0: port 1(veth1) entered blocking state
[ 252.366212][ T2082] br0: port 1(veth1) entered forwarding state
[ 253.015317][ T3891] br0: port 2(veth2) entered blocking state
[ 253.015665][ T3891] br0: port 2(veth2) entered disabled state
[ 253.016009][ T3891] veth2: entered allmulticast mode
[ 253.018058][ T3891] veth2: entered promiscuous mode
[ 253.183635][ T2079] br0: port 2(veth2) entered blocking state
[ 253.183990][ T2079] br0: port 2(veth2) entered forwarding state
# TEST: port_vlan: presence of ngroups and maxgroups attributes [ OK ]
# TEST: cfg4: port_vlan: ngroups reporting [ OK ]
# TEST: cfg4: port_vlan: isolation of port and per-VLAN ngroups [ OK ]
# TEST: ctl4: port_vlan: ngroups reporting [ OK ]
# TEST: ctl4: port_vlan: isolation of port and per-VLAN ngroups [ OK ]
# TEST: cfg6: port_vlan: ngroups reporting [ OK ]
# TEST: cfg6: port_vlan: isolation of port and per-VLAN ngroups [ OK ]
# TEST: ctl6: port_vlan: ngroups reporting [ OK ]
# TEST: ctl6: port_vlan: isolation of port and per-VLAN ngroups [ OK ]
# TEST: cfg4: port_vlan maxgroups: reporting and treatment of 0 [ OK ]
# TEST: cfg4: port_vlan maxgroups: isolation of port and per-VLAN maximums [ OK ]
# TEST: cfg4: port_vlan maxgroups: configure below ngroups [ OK ]
# TEST: cfg4: port_vlan maxgroups: add too many MDB entries [ OK ]
# TEST: cfg4: port_vlan maxgroups: isolation of port and per-VLAN ngroups [ OK ]
# TEST: ctl4: port_vlan maxgroups: reporting and treatment of 0 [ OK ]
# TEST: ctl4: port_vlan maxgroups: isolation of port and per-VLAN maximums [ OK ]
# TEST: ctl4: port_vlan maxgroups: configure below ngroups [ OK ]
# TEST: ctl4: port_vlan maxgroups: add too many MDB entries [ OK ]
# TEST: ctl4: port_vlan maxgroups: isolation of port and per-VLAN ngroups [ OK ]
# TEST: cfg6: port_vlan maxgroups: reporting and treatment of 0 [ OK ]
# TEST: cfg6: port_vlan maxgroups: isolation of port and per-VLAN maximums [ OK ]
# TEST: cfg6: port_vlan maxgroups: configure below ngroups [ OK ]
# TEST: cfg6: port_vlan maxgroups: add too many MDB entries [ OK ]
# TEST: cfg6: port_vlan maxgroups: isolation of port and per-VLAN ngroups [ OK ]
# TEST: ctl6: port_vlan maxgroups: reporting and treatment of 0 [ OK ]
# TEST: ctl6: port_vlan maxgroups: isolation of port and per-VLAN maximums [ OK ]
# TEST: ctl6: port_vlan maxgroups: configure below ngroups [ OK ]
# TEST: ctl6: port_vlan maxgroups: add too many MDB entries [ OK ]
# TEST: ctl6: port_vlan maxgroups: isolation of port and per-VLAN ngroups [ OK ]
# TEST: cfg4: port_vlan: temp: mcast_vlan_snooping toggle [ OK ]
# TEST: cfg4: port_vlan: permanent: mcast_vlan_snooping toggle [ OK ]
[ 476.209081][ T7709] br0: port 2(veth2) entered disabled state
[ 476.388263][ T7711] veth2: left allmulticast mode
[ 476.388581][ T7711] veth2: left promiscuous mode
[ 476.389011][ T7711] br0: port 2(veth2) entered disabled state
[ 476.597425][ T7712] br0: port 1(veth1) entered disabled state
[ 476.764756][ T7714] veth1: left allmulticast mode
[ 476.765064][ T7714] veth1: left promiscuous mode
[ 476.766194][ T7714] br0: port 1(veth1) entered disabled state
ok 1 selftests: net/forwarding: bridge_mdb_max.sh
make[1]: Leaving directory '/home/virtme/testing-4/tools/testing/selftests/net/forwarding'
make: Leaving directory '/home/virtme/testing-4/tools/testing/selftests'
xx__-> echo $?
0
xx__->