@@ -681,6 +681,170 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp)
681
681
return XDP_DROP ;
682
682
}
683
683
684
+ char memset_zero_data [] = "data to be zeroed" ;
685
+
686
+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
687
+ int test_dynptr_memset_zero (void * ctx )
688
+ {
689
+ __u32 data_sz = sizeof (memset_zero_data );
690
+ char zeroes [32 ] = {'\0' };
691
+ struct bpf_dynptr ptr ;
692
+
693
+ err = bpf_dynptr_from_mem (memset_zero_data , data_sz , 0 , & ptr );
694
+ err = err ?: bpf_dynptr_memset (& ptr , 0 , data_sz , 0 );
695
+ err = err ?: bpf_memcmp (zeroes , memset_zero_data , data_sz );
696
+
697
+ return 0 ;
698
+ }
699
+
700
+ #define DYNPTR_MEMSET_VAL 42
701
+
702
+ char memset_notzero_data [] = "data to be overwritten" ;
703
+
704
+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
705
+ int test_dynptr_memset_notzero (void * ctx )
706
+ {
707
+ u32 data_sz = sizeof (memset_notzero_data );
708
+ struct bpf_dynptr ptr ;
709
+ char expected [32 ];
710
+
711
+ memset (expected , DYNPTR_MEMSET_VAL , data_sz );
712
+
713
+ err = bpf_dynptr_from_mem (memset_notzero_data , data_sz , 0 , & ptr );
714
+ err = err ?: bpf_dynptr_memset (& ptr , 0 , data_sz , DYNPTR_MEMSET_VAL );
715
+ err = err ?: bpf_memcmp (expected , memset_notzero_data , data_sz );
716
+
717
+ return 0 ;
718
+ }
719
+
720
+ char memset_zero_offset_data [] = "data to be zeroed partially" ;
721
+
722
+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
723
+ int test_dynptr_memset_zero_offset (void * ctx )
724
+ {
725
+ char expected [] = "data to \0\0\0\0eroed partially" ;
726
+ __u32 data_sz = sizeof (memset_zero_offset_data );
727
+ struct bpf_dynptr ptr ;
728
+
729
+ err = bpf_dynptr_from_mem (memset_zero_offset_data , data_sz , 0 , & ptr );
730
+ err = err ?: bpf_dynptr_memset (& ptr , 8 , 4 , 0 );
731
+ err = err ?: bpf_memcmp (expected , memset_zero_offset_data , data_sz );
732
+
733
+ return 0 ;
734
+ }
735
+
736
+ char memset_zero_adjusted_data [] = "data to be zeroed partially" ;
737
+
738
+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
739
+ int test_dynptr_memset_zero_adjusted (void * ctx )
740
+ {
741
+ char expected [] = "data\0\0\0\0be zeroed partially" ;
742
+ __u32 data_sz = sizeof (memset_zero_adjusted_data );
743
+ struct bpf_dynptr ptr ;
744
+
745
+ err = bpf_dynptr_from_mem (memset_zero_adjusted_data , data_sz , 0 , & ptr );
746
+ err = err ?: bpf_dynptr_adjust (& ptr , 4 , 8 );
747
+ err = err ?: bpf_dynptr_memset (& ptr , 0 , bpf_dynptr_size (& ptr ), 0 );
748
+ err = err ?: bpf_memcmp (expected , memset_zero_adjusted_data , data_sz );
749
+
750
+ return 0 ;
751
+ }
752
+
753
+ char memset_overflow_data [] = "memset overflow data" ;
754
+
755
+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
756
+ int test_dynptr_memset_overflow (void * ctx )
757
+ {
758
+ __u32 data_sz = sizeof (memset_overflow_data );
759
+ struct bpf_dynptr ptr ;
760
+ int ret ;
761
+
762
+ err = bpf_dynptr_from_mem (memset_overflow_data , data_sz , 0 , & ptr );
763
+ ret = bpf_dynptr_memset (& ptr , 0 , data_sz + 1 , 0 );
764
+ if (ret != - E2BIG )
765
+ err = 1 ;
766
+
767
+ return 0 ;
768
+ }
769
+
770
+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
771
+ int test_dynptr_memset_overflow_offset (void * ctx )
772
+ {
773
+ __u32 data_sz = sizeof (memset_overflow_data );
774
+ struct bpf_dynptr ptr ;
775
+ int ret ;
776
+
777
+ err = bpf_dynptr_from_mem (memset_overflow_data , data_sz , 0 , & ptr );
778
+ ret = bpf_dynptr_memset (& ptr , 1 , data_sz , 0 );
779
+ if (ret != - E2BIG )
780
+ err = 1 ;
781
+
782
+ return 0 ;
783
+ }
784
+
785
+ SEC ("?cgroup_skb/egress" )
786
+ int test_dynptr_memset_readonly (struct __sk_buff * skb )
787
+ {
788
+ struct bpf_dynptr ptr ;
789
+ int ret ;
790
+
791
+ err = bpf_dynptr_from_skb (skb , 0 , & ptr );
792
+
793
+ /* cgroup skbs are read only, memset should fail */
794
+ ret = bpf_dynptr_memset (& ptr , 0 , bpf_dynptr_size (& ptr ), 0 );
795
+ if (ret != - EINVAL )
796
+ err = 1 ;
797
+
798
+ return 0 ;
799
+ }
800
+
801
+ SEC ("xdp" )
802
+ int test_dynptr_memset_xdp_chunks (struct xdp_md * xdp )
803
+ {
804
+ const int max_chunks = 200 ;
805
+ struct bpf_dynptr ptr_xdp ;
806
+ char expected_buf [32 ];
807
+ u32 data_sz , offset ;
808
+ char buf [32 ];
809
+ int i ;
810
+
811
+ __builtin_memset (expected_buf , DYNPTR_MEMSET_VAL , sizeof (expected_buf ));
812
+
813
+ /* ptr_xdp is backed by non-contiguous memory */
814
+ bpf_dynptr_from_xdp (xdp , 0 , & ptr_xdp );
815
+ data_sz = bpf_dynptr_size (& ptr_xdp );
816
+
817
+ err = bpf_dynptr_memset (& ptr_xdp , 0 , data_sz , DYNPTR_MEMSET_VAL );
818
+ if (err )
819
+ goto out ;
820
+
821
+ bpf_for (i , 0 , max_chunks ) {
822
+ offset = i * sizeof (buf );
823
+ err = bpf_dynptr_read (& buf , sizeof (buf ), & ptr_xdp , offset , 0 );
824
+ switch (err ) {
825
+ case 0 :
826
+ break ;
827
+ case - E2BIG :
828
+ goto handle_tail ;
829
+ default :
830
+ goto out ;
831
+ }
832
+ err = bpf_memcmp (buf , expected_buf , sizeof (buf ));
833
+ if (err )
834
+ goto out ;
835
+ }
836
+
837
+ handle_tail :
838
+ if (data_sz - offset < sizeof (buf )) {
839
+ err = bpf_dynptr_read (& buf , data_sz - offset , & ptr_xdp , offset , 0 );
840
+ if (err )
841
+ goto out ;
842
+ err = bpf_memcmp (buf , expected_buf , data_sz - offset );
843
+ }
844
+ out :
845
+ return XDP_DROP ;
846
+ }
847
+
684
848
void * user_ptr ;
685
849
/* Contains the copy of the data pointed by user_ptr.
686
850
* Size 384 to make it not fit into a single kernel chunk when copying
0 commit comments