@@ -3233,13 +3233,7 @@ static void _slurm_rpc_request_control(slurm_msg_t *msg)
3233
3233
/* _slurm_rpc_shutdown_controller - process RPC to shutdown slurmctld */
3234
3234
static void _slurm_rpc_shutdown_controller (slurm_msg_t * msg )
3235
3235
{
3236
- int error_code = SLURM_SUCCESS ;
3237
- slurmctld_shutdown_type_t options = SLURMCTLD_SHUTDOWN_ALL ;
3238
- time_t now = time (NULL );
3239
3236
shutdown_msg_t * shutdown_msg = msg -> data ;
3240
- /* Locks: Read node */
3241
- slurmctld_lock_t node_read_lock = {
3242
- NO_LOCK , NO_LOCK , READ_LOCK , NO_LOCK , NO_LOCK };
3243
3237
3244
3238
if (!validate_super_user (msg -> auth_uid )) {
3245
3239
error ("Security violation, SHUTDOWN RPC from uid=%u" ,
@@ -3248,23 +3242,13 @@ static void _slurm_rpc_shutdown_controller(slurm_msg_t *msg)
3248
3242
return ;
3249
3243
}
3250
3244
3251
- if (msg -> msg_type == REQUEST_CONTROL ) {
3252
- info ("Performing RPC: REQUEST_CONTROL" );
3253
- slurm_mutex_lock (& slurmctld_config .backup_finish_lock );
3254
- /* resume backup mode */
3255
- slurmctld_config .resume_backup = true;
3256
- } else {
3257
- info ("Performing RPC: REQUEST_SHUTDOWN" );
3258
- options = shutdown_msg -> options ;
3259
- }
3245
+ info ("Performing RPC: REQUEST_SHUTDOWN" );
3260
3246
3261
- /* do RPC call */
3262
3247
if (slurmctld_config .shutdown_time )
3263
3248
debug2 ("shutdown RPC issued when already in progress" );
3264
3249
else {
3265
- if ((msg -> msg_type == REQUEST_SHUTDOWN ) &&
3266
- (options == SLURMCTLD_SHUTDOWN_ALL )) {
3267
- /* This means (msg->msg_type != REQUEST_CONTROL) */
3250
+ if (shutdown_msg -> options == SLURMCTLD_SHUTDOWN_ALL ) {
3251
+ slurmctld_lock_t node_read_lock = { .node = READ_LOCK };
3268
3252
lock_slurmctld (node_read_lock );
3269
3253
msg_to_slurmd (REQUEST_SHUTDOWN );
3270
3254
unlock_slurmctld (node_read_lock );
@@ -3273,40 +3257,7 @@ static void _slurm_rpc_shutdown_controller(slurm_msg_t *msg)
3273
3257
pthread_kill (pthread_self (), SIGTERM );
3274
3258
}
3275
3259
3276
- if (msg -> msg_type == REQUEST_CONTROL ) {
3277
- struct timespec ts = {0 , 0 };
3278
-
3279
- /* save_all_state(); performed by _slurmctld_background */
3280
-
3281
- /*
3282
- * Wait for the backup to dump state and finish up everything.
3283
- * This should happen in _slurmctld_background and then release
3284
- * once we know for sure we are in backup mode in run_backup().
3285
- * Here we will wait CONTROL_TIMEOUT - 1 before we reply.
3286
- */
3287
- ts .tv_sec = now + CONTROL_TIMEOUT - 1 ;
3288
-
3289
- slurm_cond_timedwait (& slurmctld_config .backup_finish_cond ,
3290
- & slurmctld_config .backup_finish_lock ,
3291
- & ts );
3292
- slurm_mutex_unlock (& slurmctld_config .backup_finish_lock );
3293
-
3294
- /*
3295
- * jobcomp/elasticsearch saves/loads the state to/from file
3296
- * elasticsearch_state. Since the jobcomp API isn't designed
3297
- * with save/load state operations, the jobcomp/elasticsearch
3298
- * _save_state() is highly coupled to its fini() function. This
3299
- * state doesn't follow the same execution path as the rest of
3300
- * Slurm states, where in save_all_sate() they are all indepen-
3301
- * dently scheduled. So we save it manually here.
3302
- */
3303
- jobcomp_g_fini ();
3304
-
3305
- if (slurmctld_config .resume_backup )
3306
- error ("%s: REQUEST_CONTROL reply but backup not completely done relinquishing control. Old state possible" , __func__ );
3307
- }
3308
-
3309
- slurm_send_rc_msg (msg , error_code );
3260
+ slurm_send_rc_msg (msg , SLURM_SUCCESS );
3310
3261
}
3311
3262
3312
3263
static int _foreach_step_match_containerid (void * x , void * arg )
0 commit comments