@@ -615,14 +615,14 @@ static void tcmur_stop_device(void *arg)
615615 struct tcmur_device * rdev = tcmu_dev_get_private (dev );
616616 bool is_open = false;
617617
618- pthread_mutex_lock (& rdev -> state_lock );
618+ pthread_mutex_lock (& rdev -> rdev_lock );
619619 /* check if this was already called due to thread cancelation */
620620 if (rdev -> flags & TCMUR_DEV_FLAG_STOPPED ) {
621- pthread_mutex_unlock (& rdev -> state_lock );
621+ pthread_mutex_unlock (& rdev -> rdev_lock );
622622 return ;
623623 }
624624 rdev -> flags |= TCMUR_DEV_FLAG_STOPPING ;
625- pthread_mutex_unlock (& rdev -> state_lock );
625+ pthread_mutex_unlock (& rdev -> rdev_lock );
626626
627627 /*
628628 * The lock thread can fire off the recovery thread, so make sure
@@ -633,19 +633,19 @@ static void tcmur_stop_device(void *arg)
633633
634634 tcmu_release_dev_lock (dev );
635635
636- pthread_mutex_lock (& rdev -> state_lock );
636+ pthread_mutex_lock (& rdev -> rdev_lock );
637637 if (rdev -> flags & TCMUR_DEV_FLAG_IS_OPEN ) {
638638 rdev -> flags &= ~TCMUR_DEV_FLAG_IS_OPEN ;
639639 is_open = true;
640640 }
641- pthread_mutex_unlock (& rdev -> state_lock );
641+ pthread_mutex_unlock (& rdev -> rdev_lock );
642642
643643 if (is_open )
644644 rhandler -> close (dev );
645645
646- pthread_mutex_lock (& rdev -> state_lock );
646+ pthread_mutex_lock (& rdev -> rdev_lock );
647647 rdev -> flags |= TCMUR_DEV_FLAG_STOPPED ;
648- pthread_mutex_unlock (& rdev -> state_lock );
648+ pthread_mutex_unlock (& rdev -> rdev_lock );
649649
650650 tcmu_dev_dbg (dev , "cmdproc cleanup done\n" );
651651}
@@ -872,10 +872,10 @@ static void *tcmur_cmdproc_thread(void *arg)
872872 * requests that LIO has completed. We only need to wait for replies
873873 * for outstanding requests so throttle the cmdproc thread now.
874874 */
875- pthread_mutex_lock (& rdev -> state_lock );
875+ pthread_mutex_lock (& rdev -> rdev_lock );
876876 if (rdev -> flags & TCMUR_DEV_FLAG_STOPPING )
877877 dev_stopping = true;
878- pthread_mutex_unlock (& rdev -> state_lock );
878+ pthread_mutex_unlock (& rdev -> rdev_lock );
879879 }
880880
881881 /*
@@ -1038,15 +1038,15 @@ static int dev_added(struct tcmu_device *dev)
10381038 goto cleanup_caw_lock ;
10391039 }
10401040
1041- ret = pthread_mutex_init (& rdev -> state_lock , NULL );
1041+ ret = pthread_mutex_init (& rdev -> rdev_lock , NULL );
10421042 if (ret ) {
10431043 ret = - ret ;
10441044 goto cleanup_format_lock ;
10451045 }
10461046
10471047 ret = setup_io_work_queue (dev );
10481048 if (ret < 0 )
1049- goto cleanup_state_lock ;
1049+ goto cleanup_rdev_lock ;
10501050
10511051 ret = setup_aio_tracking (rdev );
10521052 if (ret < 0 )
@@ -1088,8 +1088,8 @@ static int dev_added(struct tcmu_device *dev)
10881088 cleanup_aio_tracking (rdev );
10891089cleanup_io_work_queue :
10901090 cleanup_io_work_queue (dev , true);
1091- cleanup_state_lock :
1092- pthread_mutex_destroy (& rdev -> state_lock );
1091+ cleanup_rdev_lock :
1092+ pthread_mutex_destroy (& rdev -> rdev_lock );
10931093cleanup_format_lock :
10941094 pthread_mutex_destroy (& rdev -> format_lock );
10951095cleanup_caw_lock :
@@ -1106,9 +1106,9 @@ static void dev_removed(struct tcmu_device *dev)
11061106 struct tcmur_device * rdev = tcmu_dev_get_private (dev );
11071107 int ret ;
11081108
1109- pthread_mutex_lock (& rdev -> state_lock );
1109+ pthread_mutex_lock (& rdev -> rdev_lock );
11101110 rdev -> flags |= TCMUR_DEV_FLAG_STOPPING ;
1111- pthread_mutex_unlock (& rdev -> state_lock );
1111+ pthread_mutex_unlock (& rdev -> rdev_lock );
11121112
11131113 /*
11141114 * The order of cleaning up worker threads and calling ->removed()
@@ -1130,7 +1130,7 @@ static void dev_removed(struct tcmu_device *dev)
11301130
11311131 tcmur_destroy_work (rdev -> event_work );
11321132
1133- ret = pthread_mutex_destroy (& rdev -> state_lock );
1133+ ret = pthread_mutex_destroy (& rdev -> rdev_lock );
11341134 if (ret != 0 )
11351135 tcmu_err ("could not cleanup state lock %d\n" , ret );
11361136
0 commit comments