}
dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
/ tb_ticks_per_sec;
- hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
- HRTIMER_MODE_REL);
+ hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
vcpu->arch.timer_running = 1;
}
smp_wmb(); /* insure spu event buffer updates are written */
/* don't want events intermingled... */
- kt = ktime_set(0, profiling_interval);
+ kt = profiling_interval;
if (!spu_prof_running)
goto stop;
hrtimer_forward(timer, timer->base->get_time(), kt);
ktime_t kt;
pr_debug("timer resolution: %lu\n", TICK_NSEC);
- kt = ktime_set(0, profiling_interval);
+ kt = profiling_interval;
hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_set_expires(&timer, kt);
timer.function = profile_spus;
return 0;
__set_cpu_idle(vcpu);
- hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+ hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
now = ktime_get();
remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
if (ktime_to_ns(remaining) < 0)
- remaining = ktime_set(0, 0);
+ remaining = 0;
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
tmcct = div64_u64(ns,
apic->lapic_timer.tscdeadline = 0;
if (apic_lvtt_oneshot(apic)) {
apic->lapic_timer.tscdeadline = 0;
- apic->lapic_timer.target_expiration = ktime_set(0, 0);
+ apic->lapic_timer.target_expiration = 0;
}
atomic_set(&apic->lapic_timer.pending, 0);
}
* This will be replaced with the stats tracking code, using
* 'avg_completion_time / 2' as the pre-sleep target.
*/
- kt = ktime_set(0, nsecs);
+ kt = nsecs;
mode = HRTIMER_MODE_REL;
hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
static ktime_t initcall_debug_start(struct device *dev)
{
- ktime_t calltime = ktime_set(0, 0);
+ ktime_t calltime = 0;
if (pm_print_times_enabled) {
pr_info("calling %s+ @ %i, parent: %s\n",
prevent_sleep_time = ktime_add(prevent_sleep_time,
ktime_sub(now, ws->start_prevent_time));
} else {
- active_time = ktime_set(0, 0);
+ active_time = 0;
}
seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
- ktime_t kt = ktime_set(0, completion_nsec);
+ ktime_t kt = completion_nsec;
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
int dst_cnt;
int i;
ktime_t ktime, start, diff;
- ktime_t filltime = ktime_set(0, 0);
- ktime_t comparetime = ktime_set(0, 0);
+ ktime_t filltime = 0;
+ ktime_t comparetime = 0;
s64 runtime = 0;
unsigned long long total_len = 0;
u8 align = 0;
drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
- hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
HRTIMER_MODE_REL);
return HRTIMER_NORESTART;
hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
- ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ DCE_VIRTUAL_VBLANK_PERIOD);
adev->mode_info.crtcs[crtc]->vblank_timer.function =
dce_virtual_vblank_timer_handle;
hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
- ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
} else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
DRM_DEBUG("Disable software vsync timer\n");
hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
{
d->wake_count++;
hrtimer_start_range_ns(&d->timer,
- ktime_set(0, NSEC_PER_MSEC),
+ NSEC_PER_MSEC,
NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
__set_current_state(intr ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
- kt = ktime_set(0, sleep_time);
+ kt = sleep_time;
schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
sleep_time *= 2;
if (sleep_time > NSEC_PER_MSEC)
}
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = ktime_set(0, 0);
+ tilcdc_crtc->last_vblank = 0;
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
return -EINVAL;
info->sampling_frequency = val;
- info->period = ktime_set(0, NSEC_PER_SEC / val);
+ info->period = NSEC_PER_SEC / val;
return len;
}
trig_info->timer.function = iio_hrtimer_trig_handler;
trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
- trig_info->period = ktime_set(0, NSEC_PER_SEC /
- trig_info->sampling_frequency);
+ trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency;
ret = iio_trigger_register(trig_info->swt.trigger);
if (ret)
RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
w->counter = 0;
- hrtimer_start(&w->timer, ktime_set(0, BIN_SAMPLE), HRTIMER_MODE_REL);
+ hrtimer_start(&w->timer, BIN_SAMPLE, HRTIMER_MODE_REL);
}
static enum hrtimer_restart timer_handler(struct hrtimer
if (!err && (chan->txdone_method & TXDONE_BY_POLL))
/* kick start the timer immediately to avoid delays */
- hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
- HRTIMER_MODE_REL);
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
static void tx_tick(struct mbox_chan *chan, int r)
struct dmxdev_filter *filter,
struct dmxdev_feed *feed)
{
- ktime_t timeout = ktime_set(0, 0);
+ ktime_t timeout = 0;
struct dmx_pes_filter_params *para = &filter->params.pes;
dmx_output_t otype;
int ret;
struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer);
cx88_ir_handle_key(ir);
- missed = hrtimer_forward_now(&ir->timer,
- ktime_set(0, ir->polling * 1000000));
+ missed = hrtimer_forward_now(&ir->timer, ir->polling * 1000000);
if (missed > 1)
ir_dprintk("Missed ticks %ld\n", missed - 1);
if (ir->polling) {
hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ir->timer.function = cx88_ir_work;
- hrtimer_start(&ir->timer,
- ktime_set(0, ir->polling * 1000000),
+ hrtimer_start(&ir->timer, ir->polling * 1000000,
HRTIMER_MODE_REL);
}
if (ir->sampling) {
pt3_proc_dma(adap);
- delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
+ delay = PT3_FETCH_DELAY * NSEC_PER_MSEC;
set_current_state(TASK_UNINTERRUPTIBLE);
freezable_schedule_hrtimeout_range(&delay,
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
ovf = 0x100000000ULL * 16;
do_div(ovf, card->pdat->freq ?: 16);
- card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+ card->ts_overflow = ktime_add_us(0, ovf);
}
ktime_t softing_raw2ktime(struct softing *card, u32 raw)
open_candev(netdev);
if (dev != netdev) {
/* notify other busses on the restart */
- softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ softing_netdev_rx(netdev, &msg, 0);
++priv->can.can_stats.restarts;
}
netif_wake_queue(netdev);
/* a dead bus has no overflows */
continue;
++netdev->stats.rx_over_errors;
- softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ softing_netdev_rx(netdev, &msg, 0);
}
/* prepare for other use */
memset(&msg, 0, sizeof(msg));
if (!netif_running(priv->net_dev))
return HRTIMER_NORESTART;
- hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+ hrtimer_forward_now(timer, polling_frequency);
return HRTIMER_RESTART;
}
hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->hrtimer.function = ec_bhf_timer_fun;
- hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
- HRTIMER_MODE_REL);
+ hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
if (!port_pcpu->timer_scheduled) {
port_pcpu->timer_scheduled = true;
- interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
hrtimer_start(&port_pcpu->tx_done_timer, interval,
HRTIMER_MODE_REL_PINNED);
}
&info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_start(&tx_wake->timer,
- ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+ TX_TIMER_DELAY_USEC * 1000UL,
HRTIMER_MODE_REL_PINNED);
}
if (!info->egress_timer_scheduled) {
hrtimer_start(&info->egress_timer,
- ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+ EGRESS_TIMER_DELAY_USEC * 1000UL,
HRTIMER_MODE_REL_PINNED);
info->egress_timer_scheduled = true;
}
case STATE_TRX_OFF:
switch (ctx->to_state) {
case STATE_RX_AACK_ON:
- tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
+ tim = c->t_off_to_aack * NSEC_PER_USEC;
/* state change from TRX_OFF to RX_AACK_ON to do a
* calibration, we need to reset the timeout for the
* next one.
goto change;
case STATE_TX_ARET_ON:
case STATE_TX_ON:
- tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
+ tim = c->t_off_to_tx_on * NSEC_PER_USEC;
/* state change from TRX_OFF to TX_ON or ARET_ON to do
* a calibration, we need to reset the timeout for the
* next one.
* to TX_ON or TRX_OFF.
*/
if (!force) {
- tim = ktime_set(0, (c->t_frame + c->t_p_ack) *
- NSEC_PER_USEC);
+ tim = (c->t_frame + c->t_p_ack) * NSEC_PER_USEC;
goto change;
}
break;
case STATE_P_ON:
switch (ctx->to_state) {
case STATE_TRX_OFF:
- tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC);
+ tim = c->t_reset_to_off * NSEC_PER_USEC;
goto change;
default:
break;
/* start timer, if not already started */
if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
hrtimer_start(&ctx->tx_timer,
- ktime_set(0, ctx->timer_interval),
+ ctx->timer_interval,
HRTIMER_MODE_REL);
}
if (rt2800usb_txstatus_pending(rt2x00dev)) {
/* Read register after 1 ms */
hrtimer_start(&rt2x00dev->txstatus_timer,
- ktime_set(0, TXSTATUS_READ_INTERVAL),
+ TXSTATUS_READ_INTERVAL,
HRTIMER_MODE_REL);
return false;
}
/* Read TX_STA_FIFO register after 2 ms */
hrtimer_start(&rt2x00dev->txstatus_timer,
- ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
+ 2 * TXSTATUS_READ_INTERVAL,
HRTIMER_MODE_REL);
}
static ktime_t fixup_debug_start(struct pci_dev *dev,
void (*fn)(struct pci_dev *dev))
{
- ktime_t calltime = ktime_set(0, 0);
+ ktime_t calltime = 0;
dev_dbg(&dev->dev, "calling %pF\n", fn);
if (initcall_debug) {
if (err)
goto err_free_keymap;
- last_pressed = ktime_set(0, 0);
+ last_pressed = 0;
return 0;
static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
{
- data->wde_interval = ktime_set(0, 300L*1E6L);
+ data->wde_interval = 300L * 1E6L;
data->trigger_delay = ktime_set(2, 500L*1E6L);
hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rtc_timer_remove(rtc, &rtc->aie_timer);
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
- rtc->aie_timer.period = ktime_set(0, 0);
+ rtc->aie_timer.period = 0;
if (alarm->enabled)
err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
return err;
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
- rtc->aie_timer.period = ktime_set(0, 0);
+ rtc->aie_timer.period = 0;
/* Alarm has to be enabled & in the future for us to enqueue it */
if (alarm->enabled && (rtc_tm_to_ktime(now) <
int count;
rtc = container_of(timer, struct rtc_device, pie_timer);
- period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ period = NSEC_PER_SEC / rtc->irq_freq;
count = hrtimer_forward_now(timer, period);
rtc_handle_legacy_irq(rtc, count, RTC_PF);
return -1;
if (enabled) {
- ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq);
+ ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
}
case AP_WAIT_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
- hr_time = ktime_set(0, poll_timeout);
+ hr_time = poll_timeout;
hrtimer_forward_now(&ap_poll_timer, hr_time);
hrtimer_restart(&ap_poll_timer);
}
time > 120000000000ULL)
return -EINVAL;
poll_timeout = time;
- hr_time = ktime_set(0, poll_timeout);
+ hr_time = poll_timeout;
spin_lock_bh(&ap_poll_timer_lock);
hrtimer_cancel(&ap_poll_timer);
if (!vscsi->rsp_q_timer.started) {
if (vscsi->rsp_q_timer.timer_pops <
MAX_TIMER_POPS) {
- kt = ktime_set(0, WAIT_NANO_SECONDS);
+ kt = WAIT_NANO_SECONDS;
} else {
/*
* slide the timeslice if the maximum
jiffies_to_timespec(delta_jiff, &ts);
kt = ktime_set(ts.tv_sec, ts.tv_nsec);
} else
- kt = ktime_set(0, sdebug_ndelay);
+ kt = sdebug_ndelay;
if (NULL == sd_dp) {
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (NULL == sd_dp)
if (!hba->outstanding_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
- scaling->busy_start_t = ktime_set(0, 0);
+ scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
}
scaling->busy_start_t = ktime_get();
scaling->is_busy_started = true;
} else {
- scaling->busy_start_t = ktime_set(0, 0);
+ scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
/* Delay the timer. */
- hrtimer_start(&ncm->task_timer,
- ktime_set(0, TX_TIMEOUT_NSECS),
+ hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
HRTIMER_MODE_REL);
/* Add the datagram position entries */
ktime_t *timeout = &ehci->hr_timeouts[event];
if (resched)
- *timeout = ktime_add(ktime_get(),
- ktime_set(0, event_delays_ns[event]));
+ *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
ehci->enabled_hrtimer_events |= (1 << event);
/* Track only the lowest-numbered pending event */
ktime_t *timeout = &fotg210->hr_timeouts[event];
if (resched)
- *timeout = ktime_add(ktime_get(),
- ktime_set(0, event_delays_ns[event]));
+ *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
fotg210->enabled_hrtimer_events |= (1 << event);
/* Track only the lowest-numbered pending event */
if (!list_empty(&controller->early_tx_list) &&
!hrtimer_is_queued(&controller->early_tx)) {
ret = HRTIMER_RESTART;
- hrtimer_forward_now(&controller->early_tx,
- ktime_set(0, 20 * NSEC_PER_USEC));
+ hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
}
spin_unlock_irqrestore(&musb->lock, flags);
unsigned long usecs = cppi41_channel->total_len / 10;
hrtimer_start_range_ns(&controller->early_tx,
- ktime_set(0, usecs * NSEC_PER_USEC),
- 20 * NSEC_PER_USEC,
- HRTIMER_MODE_REL);
+ usecs * NSEC_PER_USEC,
+ 20 * NSEC_PER_USEC,
+ HRTIMER_MODE_REL);
}
out:
void dlm_scan_waiters(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
- ktime_t zero = ktime_set(0, 0);
+ ktime_t zero = 0;
s64 us;
s64 debug_maxus = 0;
u32 debug_scanned = 0;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_ops = glops;
- gl->gl_dstamp = ktime_set(0, 0);
+ gl->gl_dstamp = 0;
preempt_disable();
/* We use the global stats to estimate the initial per-glock stats */
gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
else
remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
- return remaining < 0 ? ktime_set(0, 0): remaining;
+ return remaining < 0 ? 0: remaining;
}
static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
static inline ktime_t net_invalid_timestamp(void)
{
- return ktime_set(0, 0);
+ return 0;
}
struct sk_buff *skb_clone_sk(struct sk_buff *skb);
* yield - it could be a while.
*/
if (unlikely(queued)) {
- ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+ ktime_t to = NSEC_PER_SEC / HZ;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
min = freezer_delta;
expires = freezer_expires;
type = freezer_alarmtype;
- freezer_delta = ktime_set(0, 0);
+ freezer_delta = 0;
spin_unlock_irqrestore(&freezer_delta_lock, flags);
rtc = alarmtimer_get_rtcdev();
now = ktime_add(now, min);
/* Set alarm, if in the past reject suspend briefly to handle */
- ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
+ ret = rtc_timer_start(rtc, &rtctimer, now, 0);
if (ret < 0)
__pm_wakeup_event(ws, MSEC_PER_SEC);
return ret;
*/
timer->is_rel = mode & HRTIMER_MODE_REL;
if (timer->is_rel)
- tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
+ tim = ktime_add_safe(tim, hrtimer_resolution);
#endif
return tim;
}
*/
#ifdef CONFIG_HIGH_RES_TIMERS
{
- ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
+ ktime_t kj = NSEC_PER_SEC / HZ;
if (timr->it.real.interval < kj)
now = ktime_add(now, kj);
struct clock_event_device *newdev, int cpu,
const struct cpumask *cpumask)
{
- ktime_t next_event;
void (*handler)(struct clock_event_device *) = NULL;
+ ktime_t next_event = 0;
/*
* First device setup ?
else
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
tick_next_period = ktime_get();
- tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
+ tick_period = NSEC_PER_SEC / HZ;
}
/*
return HRTIMER_RESTART;
} else {
/* rearm throttle handling */
- op->kt_lastmsg = ktime_set(0, 0);
+ op->kt_lastmsg = 0;
return HRTIMER_NORESTART;
}
}
* In any case cancel the throttle timer, flush
* potentially blocked msgs and reset throttle handling
*/
- op->kt_lastmsg = ktime_set(0, 0);
+ op->kt_lastmsg = 0;
hrtimer_cancel(&op->thrtimer);
bcm_rx_thr_flush(op, 1);
}
if (skb->len > max_sifs_size)
hrtimer_start(&local->ifs_timer,
- ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC),
+ hw->phy->lifs_period * NSEC_PER_USEC,
HRTIMER_MODE_REL);
else
hrtimer_start(&local->ifs_timer,
- ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC),
+ hw->phy->sifs_period * NSEC_PER_USEC,
HRTIMER_MODE_REL);
} else {
ieee802154_wake_queue(hw);
if (delay) {
ktime_t time;
- time = ktime_set(0, 0);
+ time = 0;
time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
}
*/
peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
- peer->last_time_heard = ktime_set(0, 0);
+ peer->last_time_heard = 0;
peer->last_time_ecne_reduced = jiffies;
peer->param_flags = SPP_HB_DISABLE |
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
x->km.state = XFRM_STATE_EXPIRED;
- tasklet_hrtimer_start(&x->mtimer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
return -EINVAL;
}
atomic_set(&chip->timer_active, 1);
chip->thalf = 0;
- hrtimer_start(&pcsp_chip.timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ hrtimer_start(&pcsp_chip.timer, 0, HRTIMER_MODE_REL);
return 0;
}
snd_rawmidi_transmit_ack(substream, port->consume_bytes);
else if (!rcode_is_permanent_error(rcode))
/* To start next transaction immediately for recovery. */
- port->next_ktime = ktime_set(0, 0);
+ port->next_ktime = 0;
else
/* Don't continue processing. */
port->error = true;
if (port->consume_bytes <= 0) {
/* Do it in next chance, immediately. */
if (port->consume_bytes == 0) {
- port->next_ktime = ktime_set(0, 0);
+ port->next_ktime = 0;
schedule_work(&port->work);
} else {
/* Fatal error. */
port->addr = addr;
port->fill = fill;
port->idling = true;
- port->next_ktime = ktime_set(0, 0);
+ port->next_ktime = 0;
port->error = false;
INIT_WORK(&port->work, midi_port_work);
static void dac_audio_set_rate(struct snd_sh_dac *chip)
{
- chip->wakeups_per_second = ktime_set(0, 1000000000 / chip->rate);
+ chip->wakeups_per_second = 1000000000 / chip->rate;
}