@@ -139,8 +139,9 @@ TEST_P(urEnqueueKernelLaunchTest, InvalidKernelArgs) {
139
139
nullptr ));
140
140
141
141
if (backend == UR_PLATFORM_BACKEND_CUDA ||
142
- backend == UR_PLATFORM_BACKEND_HIP) {
143
- GTEST_FAIL () << " AMD and Nvidia can't check kernel arguments." ;
142
+ backend == UR_PLATFORM_BACKEND_HIP ||
143
+ backend == UR_PLATFORM_BACKEND_LEVEL_ZERO) {
144
+ GTEST_FAIL () << " AMD, L0 and Nvidia can't check kernel arguments." ;
144
145
}
145
146
146
147
// Enqueue kernel without setting any args
@@ -561,16 +562,17 @@ TEST_P(urEnqueueKernelLaunchUSMLinkedList, Success) {
561
562
}
562
563
563
564
// Build linked list with USM allocations
564
- ASSERT_SUCCESS (urUSMSharedAlloc (context, device, nullptr , pool,
565
- sizeof (Node),
565
+ ur_usm_desc_t desc{UR_STRUCTURE_TYPE_USM_DESC, nullptr , 0 };
566
+ desc.align = alignof (Node);
567
+ ASSERT_SUCCESS (urUSMSharedAlloc (context, device, &desc, pool, sizeof (Node),
566
568
reinterpret_cast <void **>(&list_head)));
567
569
ASSERT_NE (list_head, nullptr );
568
570
Node *list_cur = list_head;
569
571
for (int i = 0 ; i < num_nodes; i++) {
570
572
list_cur->num = i * 2 ;
571
573
if (i < num_nodes - 1 ) {
572
574
ASSERT_SUCCESS (
573
- urUSMSharedAlloc (context, device, nullptr , pool, sizeof (Node),
575
+ urUSMSharedAlloc (context, device, &desc , pool, sizeof (Node),
574
576
reinterpret_cast <void **>(&list_cur->next )));
575
577
ASSERT_NE (list_cur->next , nullptr );
576
578
} else {
@@ -579,6 +581,11 @@ TEST_P(urEnqueueKernelLaunchUSMLinkedList, Success) {
579
581
list_cur = list_cur->next ;
580
582
}
581
583
584
+ ur_bool_t indirect = true ;
585
+ ASSERT_SUCCESS (urKernelSetExecInfo (kernel,
586
+ UR_KERNEL_EXEC_INFO_USM_INDIRECT_ACCESS,
587
+ sizeof (indirect), nullptr , &indirect));
588
+
582
589
// Run kernel which will iterate the list and modify the values
583
590
ASSERT_SUCCESS (urKernelSetArgPointer (kernel, 0 , nullptr , list_head));
584
591
ASSERT_SUCCESS (urEnqueueKernelLaunch (queue, kernel, 1 , &global_offset,
0 commit comments