8
8
#include <linux/completion.h>
9
9
#include <linux/debugfs.h>
10
10
#include <linux/device.h>
11
+ #include <linux/hrtimer.h>
11
12
#include <linux/jiffies.h>
12
13
#include <linux/kernel.h>
14
+ #include <linux/kthread.h>
13
15
#include <linux/module.h>
14
16
#include <linux/mutex.h>
15
17
#include <linux/power_supply.h>
28
30
#include <linux/usb/role.h>
29
31
#include <linux/usb/tcpm.h>
30
32
#include <linux/usb/typec_altmode.h>
31
- #include <linux/workqueue.h>
33
+
34
+ #include <uapi/linux/sched/types.h>
32
35
33
36
#define FOREACH_STATE (S ) \
34
37
S(INVALID_STATE), \
@@ -203,7 +206,7 @@ struct tcpm_port {
203
206
struct device * dev ;
204
207
205
208
struct mutex lock ; /* tcpm state machine lock */
206
- struct workqueue_struct * wq ;
209
+ struct kthread_worker * wq ;
207
210
208
211
struct typec_capability typec_caps ;
209
212
struct typec_port * typec_port ;
@@ -247,15 +250,17 @@ struct tcpm_port {
247
250
enum tcpm_state prev_state ;
248
251
enum tcpm_state state ;
249
252
enum tcpm_state delayed_state ;
250
- unsigned long delayed_runtime ;
253
+ ktime_t delayed_runtime ;
251
254
unsigned long delay_ms ;
252
255
253
256
spinlock_t pd_event_lock ;
254
257
u32 pd_events ;
255
258
256
- struct work_struct event_work ;
257
- struct delayed_work state_machine ;
258
- struct delayed_work vdm_state_machine ;
259
+ struct kthread_work event_work ;
260
+ struct hrtimer state_machine_timer ;
261
+ struct kthread_work state_machine ;
262
+ struct hrtimer vdm_state_machine_timer ;
263
+ struct kthread_work vdm_state_machine ;
259
264
bool state_machine_running ;
260
265
261
266
struct completion tx_complete ;
@@ -340,7 +345,7 @@ struct tcpm_port {
340
345
};
341
346
342
347
struct pd_rx_event {
343
- struct work_struct work ;
348
+ struct kthread_work work ;
344
349
struct tcpm_port * port ;
345
350
struct pd_message msg ;
346
351
};
@@ -914,6 +919,27 @@ static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
914
919
return tcpm_pd_transmit (port , TCPC_TX_SOP , & msg );
915
920
}
916
921
922
+ static void mod_tcpm_delayed_work (struct tcpm_port * port , unsigned int delay_ms )
923
+ {
924
+ if (delay_ms ) {
925
+ hrtimer_start (& port -> state_machine_timer , ms_to_ktime (delay_ms ), HRTIMER_MODE_REL );
926
+ } else {
927
+ hrtimer_cancel (& port -> state_machine_timer );
928
+ kthread_queue_work (port -> wq , & port -> state_machine );
929
+ }
930
+ }
931
+
932
+ static void mod_vdm_delayed_work (struct tcpm_port * port , unsigned int delay_ms )
933
+ {
934
+ if (delay_ms ) {
935
+ hrtimer_start (& port -> vdm_state_machine_timer , ms_to_ktime (delay_ms ),
936
+ HRTIMER_MODE_REL );
937
+ } else {
938
+ hrtimer_cancel (& port -> vdm_state_machine_timer );
939
+ kthread_queue_work (port -> wq , & port -> vdm_state_machine );
940
+ }
941
+ }
942
+
917
943
static void tcpm_set_state (struct tcpm_port * port , enum tcpm_state state ,
918
944
unsigned int delay_ms )
919
945
{
@@ -922,9 +948,8 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
922
948
tcpm_states [port -> state ], tcpm_states [state ],
923
949
delay_ms );
924
950
port -> delayed_state = state ;
925
- mod_delayed_work (port -> wq , & port -> state_machine ,
926
- msecs_to_jiffies (delay_ms ));
927
- port -> delayed_runtime = jiffies + msecs_to_jiffies (delay_ms );
951
+ mod_tcpm_delayed_work (port , delay_ms );
952
+ port -> delayed_runtime = ktime_add (ktime_get (), ms_to_ktime (delay_ms ));
928
953
port -> delay_ms = delay_ms ;
929
954
} else {
930
955
tcpm_log (port , "state change %s -> %s" ,
@@ -939,7 +964,7 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
939
964
* machine.
940
965
*/
941
966
if (!port -> state_machine_running )
942
- mod_delayed_work (port -> wq , & port -> state_machine , 0 );
967
+ mod_tcpm_delayed_work (port , 0 );
943
968
}
944
969
}
945
970
@@ -960,7 +985,7 @@ static void tcpm_queue_message(struct tcpm_port *port,
960
985
enum pd_msg_request message )
961
986
{
962
987
port -> queued_message = message ;
963
- mod_delayed_work (port -> wq , & port -> state_machine , 0 );
988
+ mod_tcpm_delayed_work (port , 0 );
964
989
}
965
990
966
991
/*
@@ -981,7 +1006,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
981
1006
port -> vdm_retries = 0 ;
982
1007
port -> vdm_state = VDM_STATE_READY ;
983
1008
984
- mod_delayed_work (port -> wq , & port -> vdm_state_machine , 0 );
1009
+ mod_vdm_delayed_work (port , 0 );
985
1010
}
986
1011
987
1012
static void tcpm_queue_vdm_unlocked (struct tcpm_port * port , const u32 header ,
@@ -1244,8 +1269,7 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
1244
1269
port -> vdm_state = VDM_STATE_WAIT_RSP_BUSY ;
1245
1270
port -> vdo_retry = (p [0 ] & ~VDO_CMDT_MASK ) |
1246
1271
CMDT_INIT ;
1247
- mod_delayed_work (port -> wq , & port -> vdm_state_machine ,
1248
- msecs_to_jiffies (PD_T_VDM_BUSY ));
1272
+ mod_vdm_delayed_work (port , PD_T_VDM_BUSY );
1249
1273
return ;
1250
1274
}
1251
1275
port -> vdm_state = VDM_STATE_DONE ;
@@ -1390,8 +1414,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
1390
1414
port -> vdm_retries = 0 ;
1391
1415
port -> vdm_state = VDM_STATE_BUSY ;
1392
1416
timeout = vdm_ready_timeout (port -> vdo_data [0 ]);
1393
- mod_delayed_work (port -> wq , & port -> vdm_state_machine ,
1394
- timeout );
1417
+ mod_vdm_delayed_work (port , timeout );
1395
1418
}
1396
1419
break ;
1397
1420
case VDM_STATE_WAIT_RSP_BUSY :
@@ -1420,10 +1443,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
1420
1443
}
1421
1444
}
1422
1445
1423
- static void vdm_state_machine_work (struct work_struct * work )
1446
+ static void vdm_state_machine_work (struct kthread_work * work )
1424
1447
{
1425
- struct tcpm_port * port = container_of (work , struct tcpm_port ,
1426
- vdm_state_machine .work );
1448
+ struct tcpm_port * port = container_of (work , struct tcpm_port , vdm_state_machine );
1427
1449
enum vdm_states prev_state ;
1428
1450
1429
1451
mutex_lock (& port -> lock );
@@ -1591,6 +1613,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
1591
1613
struct tcpm_port * port = typec_altmode_get_drvdata (altmode );
1592
1614
1593
1615
tcpm_queue_vdm_unlocked (port , header , data , count - 1 );
1616
+
1594
1617
return 0 ;
1595
1618
}
1596
1619
@@ -2005,7 +2028,7 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
2005
2028
}
2006
2029
}
2007
2030
2008
- static void tcpm_pd_rx_handler (struct work_struct * work )
2031
+ static void tcpm_pd_rx_handler (struct kthread_work * work )
2009
2032
{
2010
2033
struct pd_rx_event * event = container_of (work ,
2011
2034
struct pd_rx_event , work );
@@ -2067,10 +2090,10 @@ void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
2067
2090
if (!event )
2068
2091
return ;
2069
2092
2070
- INIT_WORK (& event -> work , tcpm_pd_rx_handler );
2093
+ kthread_init_work (& event -> work , tcpm_pd_rx_handler );
2071
2094
event -> port = port ;
2072
2095
memcpy (& event -> msg , msg , sizeof (* msg ));
2073
- queue_work (port -> wq , & event -> work );
2096
+ kthread_queue_work (port -> wq , & event -> work );
2074
2097
}
2075
2098
EXPORT_SYMBOL_GPL (tcpm_pd_receive );
2076
2099
@@ -2123,9 +2146,9 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
2123
2146
} while (port -> queued_message != PD_MSG_NONE );
2124
2147
2125
2148
if (port -> delayed_state != INVALID_STATE ) {
2126
- if (time_is_after_jiffies (port -> delayed_runtime )) {
2127
- mod_delayed_work (port -> wq , & port -> state_machine ,
2128
- port -> delayed_runtime - jiffies );
2149
+ if (ktime_after (port -> delayed_runtime , ktime_get () )) {
2150
+ mod_tcpm_delayed_work (port , ktime_to_ms ( ktime_sub ( port -> delayed_runtime ,
2151
+ ktime_get ())) );
2129
2152
return true;
2130
2153
}
2131
2154
port -> delayed_state = INVALID_STATE ;
@@ -3258,10 +3281,9 @@ static void run_state_machine(struct tcpm_port *port)
3258
3281
case SNK_DISCOVERY_DEBOUNCE_DONE :
3259
3282
if (!tcpm_port_is_disconnected (port ) &&
3260
3283
tcpm_port_is_sink (port ) &&
3261
- time_is_after_jiffies (port -> delayed_runtime )) {
3284
+ ktime_after (port -> delayed_runtime , ktime_get () )) {
3262
3285
tcpm_set_state (port , SNK_DISCOVERY ,
3263
- jiffies_to_msecs (port -> delayed_runtime -
3264
- jiffies ));
3286
+ ktime_to_ms (ktime_sub (port -> delayed_runtime , ktime_get ())));
3265
3287
break ;
3266
3288
}
3267
3289
tcpm_set_state (port , unattached_state (port ), 0 );
@@ -3656,10 +3678,9 @@ static void run_state_machine(struct tcpm_port *port)
3656
3678
}
3657
3679
}
3658
3680
3659
- static void tcpm_state_machine_work (struct work_struct * work )
3681
+ static void tcpm_state_machine_work (struct kthread_work * work )
3660
3682
{
3661
- struct tcpm_port * port = container_of (work , struct tcpm_port ,
3662
- state_machine .work );
3683
+ struct tcpm_port * port = container_of (work , struct tcpm_port , state_machine );
3663
3684
enum tcpm_state prev_state ;
3664
3685
3665
3686
mutex_lock (& port -> lock );
@@ -4019,7 +4040,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
4019
4040
0 );
4020
4041
}
4021
4042
4022
- static void tcpm_pd_event_handler (struct work_struct * work )
4043
+ static void tcpm_pd_event_handler (struct kthread_work * work )
4023
4044
{
4024
4045
struct tcpm_port * port = container_of (work , struct tcpm_port ,
4025
4046
event_work );
@@ -4060,7 +4081,7 @@ void tcpm_cc_change(struct tcpm_port *port)
4060
4081
spin_lock (& port -> pd_event_lock );
4061
4082
port -> pd_events |= TCPM_CC_EVENT ;
4062
4083
spin_unlock (& port -> pd_event_lock );
4063
- queue_work (port -> wq , & port -> event_work );
4084
+ kthread_queue_work (port -> wq , & port -> event_work );
4064
4085
}
4065
4086
EXPORT_SYMBOL_GPL (tcpm_cc_change );
4066
4087
@@ -4069,7 +4090,7 @@ void tcpm_vbus_change(struct tcpm_port *port)
4069
4090
spin_lock (& port -> pd_event_lock );
4070
4091
port -> pd_events |= TCPM_VBUS_EVENT ;
4071
4092
spin_unlock (& port -> pd_event_lock );
4072
- queue_work (port -> wq , & port -> event_work );
4093
+ kthread_queue_work (port -> wq , & port -> event_work );
4073
4094
}
4074
4095
EXPORT_SYMBOL_GPL (tcpm_vbus_change );
4075
4096
@@ -4078,7 +4099,7 @@ void tcpm_pd_hard_reset(struct tcpm_port *port)
4078
4099
spin_lock (& port -> pd_event_lock );
4079
4100
port -> pd_events = TCPM_RESET_EVENT ;
4080
4101
spin_unlock (& port -> pd_event_lock );
4081
- queue_work (port -> wq , & port -> event_work );
4102
+ kthread_queue_work (port -> wq , & port -> event_work );
4082
4103
}
4083
4104
EXPORT_SYMBOL_GPL (tcpm_pd_hard_reset );
4084
4105
@@ -4786,6 +4807,22 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
4786
4807
return PTR_ERR_OR_ZERO (port -> psy );
4787
4808
}
4788
4809
4810
+ static enum hrtimer_restart state_machine_timer_handler (struct hrtimer * timer )
4811
+ {
4812
+ struct tcpm_port * port = container_of (timer , struct tcpm_port , state_machine_timer );
4813
+
4814
+ kthread_queue_work (port -> wq , & port -> state_machine );
4815
+ return HRTIMER_NORESTART ;
4816
+ }
4817
+
4818
+ static enum hrtimer_restart vdm_state_machine_timer_handler (struct hrtimer * timer )
4819
+ {
4820
+ struct tcpm_port * port = container_of (timer , struct tcpm_port , vdm_state_machine_timer );
4821
+
4822
+ kthread_queue_work (port -> wq , & port -> vdm_state_machine );
4823
+ return HRTIMER_NORESTART ;
4824
+ }
4825
+
4789
4826
struct tcpm_port * tcpm_register_port (struct device * dev , struct tcpc_dev * tcpc )
4790
4827
{
4791
4828
struct tcpm_port * port ;
@@ -4807,12 +4844,18 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
4807
4844
mutex_init (& port -> lock );
4808
4845
mutex_init (& port -> swap_lock );
4809
4846
4810
- port -> wq = create_singlethread_workqueue (dev_name (dev ));
4811
- if (!port -> wq )
4812
- return ERR_PTR (- ENOMEM );
4813
- INIT_DELAYED_WORK (& port -> state_machine , tcpm_state_machine_work );
4814
- INIT_DELAYED_WORK (& port -> vdm_state_machine , vdm_state_machine_work );
4815
- INIT_WORK (& port -> event_work , tcpm_pd_event_handler );
4847
+ port -> wq = kthread_create_worker (0 , dev_name (dev ));
4848
+ if (IS_ERR (port -> wq ))
4849
+ return ERR_CAST (port -> wq );
4850
+ sched_set_fifo (port -> wq -> task );
4851
+
4852
+ kthread_init_work (& port -> state_machine , tcpm_state_machine_work );
4853
+ kthread_init_work (& port -> vdm_state_machine , vdm_state_machine_work );
4854
+ kthread_init_work (& port -> event_work , tcpm_pd_event_handler );
4855
+ hrtimer_init (& port -> state_machine_timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
4856
+ port -> state_machine_timer .function = state_machine_timer_handler ;
4857
+ hrtimer_init (& port -> vdm_state_machine_timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL );
4858
+ port -> vdm_state_machine_timer .function = vdm_state_machine_timer_handler ;
4816
4859
4817
4860
spin_lock_init (& port -> pd_event_lock );
4818
4861
@@ -4864,7 +4907,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
4864
4907
usb_role_switch_put (port -> role_sw );
4865
4908
out_destroy_wq :
4866
4909
tcpm_debugfs_exit (port );
4867
- destroy_workqueue (port -> wq );
4910
+ kthread_destroy_worker (port -> wq );
4868
4911
return ERR_PTR (err );
4869
4912
}
4870
4913
EXPORT_SYMBOL_GPL (tcpm_register_port );
@@ -4879,7 +4922,7 @@ void tcpm_unregister_port(struct tcpm_port *port)
4879
4922
typec_unregister_port (port -> typec_port );
4880
4923
usb_role_switch_put (port -> role_sw );
4881
4924
tcpm_debugfs_exit (port );
4882
- destroy_workqueue (port -> wq );
4925
+ kthread_destroy_worker (port -> wq );
4883
4926
}
4884
4927
EXPORT_SYMBOL_GPL (tcpm_unregister_port );
4885
4928
0 commit comments