/** Using udelay() for intervals greater than a few milliseconds can* risk overflow for high loops_per_jiffy (high bogomips) machines. The* mdelay() provides a wrapper to prevent this. For delays greater* than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture* specific values can be defined in asm-???/delay.h as an override.* The 2nd mdelay() definition ensures GCC will optimize away the * while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G.*/#ifndef MAX_UDELAY_MS
#define MAX_UDELAY_MS 5
#endif#ifndef mdelay
#define mdelay(n) (\(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
#endif#ifndef ndelay
static inline void ndelay(unsigned long x)
{udelay(DIV_ROUND_UP(x, 1000));
}
#define ndelay(x) ndelay(x)
#endif
/*** msleep - sleep safely even with waitqueue interruptions* @msecs: Time in milliseconds to sleep for*/
void msleep(unsigned int msecs)
{unsigned long timeout = msecs_to_jiffies(msecs) + 1;while (timeout)timeout = schedule_timeout_uninterruptible(timeout);
}EXPORT_SYMBOL(msleep);signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{__set_current_state(TASK_UNINTERRUPTIBLE);return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_uninterruptible);// 定时时间到达之后调用的回调函数,其实就是将睡眠进程唤醒
static void process_timeout(struct timer_list *t)
{struct process_timer *timeout = from_timer(timeout, t, timer);wake_up_process(timeout->task);
}/*** schedule_timeout - sleep until timeout* @timeout: timeout value in jiffies** Make the current task sleep until @timeout jiffies have* elapsed. The routine will return immediately unless* the current task state has been set (see set_current_state()).** You can set the task state as follows -** %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to* pass before the routine returns unless the current task is explicitly* woken up, (e.g. by wake_up_process())".** %TASK_INTERRUPTIBLE - the routine may return early if a signal is* delivered to the current task or the current task is explicitly woken* up.** The current task state is guaranteed to be TASK_RUNNING when this* routine returns.** Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule* the CPU away without a bound on the timeout. In this case the return* value will be %MAX_SCHEDULE_TIMEOUT.** Returns 0 when the timer has expired otherwise the remaining time in* jiffies will be returned. In all cases the return value is guaranteed* to be non-negative.*/
signed long __sched schedule_timeout(signed long timeout)
{struct process_timer timer;unsigned long expire;switch (timeout){case MAX_SCHEDULE_TIMEOUT:/** These two special cases are useful to be comfortable* in the caller. Nothing more. We could take* MAX_SCHEDULE_TIMEOUT from one of the negative value* but I' d like to return a valid offset (>=0) to allow* the caller to do everything it want with the retval.*/schedule();goto out;default:/** Another bit of PARANOID. Note that the retval will be* 0 since no piece of kernel is supposed to do a check* for a negative retval of schedule_timeout() (since it* should never happens anyway). You just have the printk()* that will tell you if something is gone wrong and where.*/if (timeout < 0) {printk(KERN_ERR "schedule_timeout: wrong timeout ""value %lx\n", timeout);dump_stack();current->state = TASK_RUNNING;goto out;}}expire = timeout + jiffies;timer.task = current;timer_setup_on_stack(&timer.timer, process_timeout, 0);__mod_timer(&timer.timer, expire, 0);schedule();del_singleshot_timer_sync(&timer.timer);/* Remove the timer from the object tracker */destroy_timer_on_stack(&timer.timer);timeout = expire - jiffies;out:return timeout < 0 ? 0 : timeout;
}
EXPORT_SYMBOL(schedule_timeout);
schedule_timeout
/** We can use __set_current_state() here because schedule_timeout() calls* schedule() unconditionally.*/
signed long __sched schedule_timeout_interruptible(signed long timeout)
{__set_current_state(TASK_INTERRUPTIBLE);return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_interruptible);signed long __sched schedule_timeout_killable(signed long timeout)
{__set_current_state(TASK_KILLABLE);return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_killable);signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{__set_current_state(TASK_UNINTERRUPTIBLE);return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_uninterruptible);/*1. Like schedule_timeout_uninterruptible(), except this task will not contribute2. to load average.*/
signed long __sched schedule_timeout_idle(signed long timeout)
{__set_current_state(TASK_IDLE);return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_idle);
/*** msecs_to_jiffies: - convert milliseconds to jiffies* @m: time in milliseconds** conversion is done as follows:** - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)** - 'too large' values [that would result in larger than* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.** - all other values are converted to jiffies by either multiplying* the input value by a factor or dividing it with a factor and* handling any 32-bit overflows.* for the details see __msecs_to_jiffies()** msecs_to_jiffies() checks for the passed in value being a constant* via __builtin_constant_p() allowing gcc to eliminate most of the* code, __msecs_to_jiffies() is called if the value passed does not* allow constant folding and the actual conversion must be done at* runtime.* the HZ range specific helpers _msecs_to_jiffies() are called both* directly here and from __msecs_to_jiffies() in the case where* constant folding is not possible.*/
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{if (__builtin_constant_p(m)) {if ((int)m < 0)return MAX_JIFFY_OFFSET;return _msecs_to_jiffies(m);} else {return __msecs_to_jiffies(m);}
}/*1. Convert jiffies to milliseconds and back.2. 3. Avoid unnecessary multiplications/divisions in the4. two most common HZ cases:*/
unsigned int jiffies_to_msecs(const unsigned long j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)return (MSEC_PER_SEC / HZ) * j;
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>HZ_TO_MSEC_SHR32;
# elsereturn DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_msecs);