/*
|
/*
|
* SUCS NET3:
|
* SUCS NET3:
|
*
|
*
|
* Generic datagram handling routines. These are generic for all protocols. Possibly a generic IP version on top
|
* Generic datagram handling routines. These are generic for all protocols. Possibly a generic IP version on top
|
* of these would make sense. Not tonight however 8-).
|
* of these would make sense. Not tonight however 8-).
|
* This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and NetROM layer all have identical poll code and mostly
|
* This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and NetROM layer all have identical poll code and mostly
|
* identical recvmsg() code. So we share it here. The poll was shared before but buried in udp.c so I moved it.
|
* identical recvmsg() code. So we share it here. The poll was shared before but buried in udp.c so I moved it.
|
*
|
*
|
* Authors: Alan Cox <alan@redhat.com>. (datagram_poll() from old udp.c code)
|
* Authors: Alan Cox <alan@redhat.com>. (datagram_poll() from old udp.c code)
|
*
|
*
|
* Fixes:
|
* Fixes:
|
* Alan Cox : NULL return from skb_peek_copy() understood
|
* Alan Cox : NULL return from skb_peek_copy() understood
|
* Alan Cox : Rewrote skb_read_datagram to avoid the skb_peek_copy stuff.
|
* Alan Cox : Rewrote skb_read_datagram to avoid the skb_peek_copy stuff.
|
* Alan Cox : Added support for SOCK_SEQPACKET. IPX can no longer use the SO_TYPE hack but
|
* Alan Cox : Added support for SOCK_SEQPACKET. IPX can no longer use the SO_TYPE hack but
|
* AX.25 now works right, and SPX is feasible.
|
* AX.25 now works right, and SPX is feasible.
|
* Alan Cox : Fixed write poll of non IP protocol crash.
|
* Alan Cox : Fixed write poll of non IP protocol crash.
|
* Florian La Roche: Changed for my new skbuff handling.
|
* Florian La Roche: Changed for my new skbuff handling.
|
* Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
|
* Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
|
* Linus Torvalds : BSD semantic fixes.
|
* Linus Torvalds : BSD semantic fixes.
|
* Alan Cox : Datagram iovec handling
|
* Alan Cox : Datagram iovec handling
|
* Darryl Miles : Fixed non-blocking SOCK_STREAM.
|
* Darryl Miles : Fixed non-blocking SOCK_STREAM.
|
* Alan Cox : POSIXisms
|
* Alan Cox : POSIXisms
|
* Pete Wyckoff : Unconnected accept() fix.
|
* Pete Wyckoff : Unconnected accept() fix.
|
*
|
*
|
*/
|
*/
|
|
|
#include <linux/types.h>
|
#include <linux/types.h>
|
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
#include <asm/system.h>
|
#include <asm/system.h>
|
#include <linux/mm.h>
|
#include <linux/mm.h>
|
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
#include <linux/errno.h>
|
#include <linux/errno.h>
|
#include <linux/sched.h>
|
#include <linux/sched.h>
|
#include <linux/inet.h>
|
#include <linux/inet.h>
|
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
#include <linux/rtnetlink.h>
|
#include <linux/rtnetlink.h>
|
#include <linux/poll.h>
|
#include <linux/poll.h>
|
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
|
|
#include <net/protocol.h>
|
#include <net/protocol.h>
|
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
#include <net/sock.h>
|
#include <net/sock.h>
|
#include <net/checksum.h>
|
#include <net/checksum.h>
|
|
|
|
|
/*
|
/*
|
* Is a socket 'connection oriented' ?
|
* Is a socket 'connection oriented' ?
|
*/
|
*/
|
|
|
static inline int connection_based(struct sock *sk)
|
static inline int connection_based(struct sock *sk)
|
{
|
{
|
return (sk->type==SOCK_SEQPACKET || sk->type==SOCK_STREAM);
|
return (sk->type==SOCK_SEQPACKET || sk->type==SOCK_STREAM);
|
}
|
}
|
|
|
|
|
/*
|
/*
|
* Wait for a packet..
|
* Wait for a packet..
|
*/
|
*/
|
|
|
static int wait_for_packet(struct sock * sk, int *err, long *timeo_p)
|
static int wait_for_packet(struct sock * sk, int *err, long *timeo_p)
|
{
|
{
|
int error;
|
int error;
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
__set_current_state(TASK_INTERRUPTIBLE);
|
add_wait_queue_exclusive(sk->sleep, &wait);
|
add_wait_queue_exclusive(sk->sleep, &wait);
|
|
|
/* Socket errors? */
|
/* Socket errors? */
|
error = sock_error(sk);
|
error = sock_error(sk);
|
if (error)
|
if (error)
|
goto out_err;
|
goto out_err;
|
|
|
if (!skb_queue_empty(&sk->receive_queue))
|
if (!skb_queue_empty(&sk->receive_queue))
|
goto ready;
|
goto ready;
|
|
|
/* Socket shut down? */
|
/* Socket shut down? */
|
if (sk->shutdown & RCV_SHUTDOWN)
|
if (sk->shutdown & RCV_SHUTDOWN)
|
goto out_noerr;
|
goto out_noerr;
|
|
|
/* Sequenced packets can come disconnected. If so we report the problem */
|
/* Sequenced packets can come disconnected. If so we report the problem */
|
error = -ENOTCONN;
|
error = -ENOTCONN;
|
if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN))
|
if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN))
|
goto out_err;
|
goto out_err;
|
|
|
/* handle signals */
|
/* handle signals */
|
if (signal_pending(current))
|
if (signal_pending(current))
|
goto interrupted;
|
goto interrupted;
|
|
|
*timeo_p = schedule_timeout(*timeo_p);
|
*timeo_p = schedule_timeout(*timeo_p);
|
|
|
ready:
|
ready:
|
current->state = TASK_RUNNING;
|
current->state = TASK_RUNNING;
|
remove_wait_queue(sk->sleep, &wait);
|
remove_wait_queue(sk->sleep, &wait);
|
return 0;
|
return 0;
|
|
|
interrupted:
|
interrupted:
|
error = sock_intr_errno(*timeo_p);
|
error = sock_intr_errno(*timeo_p);
|
out_err:
|
out_err:
|
*err = error;
|
*err = error;
|
out:
|
out:
|
current->state = TASK_RUNNING;
|
current->state = TASK_RUNNING;
|
remove_wait_queue(sk->sleep, &wait);
|
remove_wait_queue(sk->sleep, &wait);
|
return error;
|
return error;
|
out_noerr:
|
out_noerr:
|
*err = 0;
|
*err = 0;
|
error = 1;
|
error = 1;
|
goto out;
|
goto out;
|
}
|
}
|
|
|
/*
|
/*
|
* Get a datagram skbuff, understands the peeking, nonblocking wakeups and possible
|
* Get a datagram skbuff, understands the peeking, nonblocking wakeups and possible
|
* races. This replaces identical code in packet,raw and udp, as well as the IPX
|
* races. This replaces identical code in packet,raw and udp, as well as the IPX
|
* AX.25 and Appletalk. It also finally fixes the long standing peek and read
|
* AX.25 and Appletalk. It also finally fixes the long standing peek and read
|
* race for datagram sockets. If you alter this routine remember it must be
|
* race for datagram sockets. If you alter this routine remember it must be
|
* re-entrant.
|
* re-entrant.
|
*
|
*
|
* This function will lock the socket if a skb is returned, so the caller
|
* This function will lock the socket if a skb is returned, so the caller
|
* needs to unlock the socket in that case (usually by calling skb_free_datagram)
|
* needs to unlock the socket in that case (usually by calling skb_free_datagram)
|
*
|
*
|
* * It does not lock socket since today. This function is
|
* * It does not lock socket since today. This function is
|
* * free of race conditions. This measure should/can improve
|
* * free of race conditions. This measure should/can improve
|
* * significantly datagram socket latencies at high loads,
|
* * significantly datagram socket latencies at high loads,
|
* * when data copying to user space takes lots of time.
|
* * when data copying to user space takes lots of time.
|
* * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
|
* * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
|
* * 8) Great win.)
|
* * 8) Great win.)
|
* * --ANK (980729)
|
* * --ANK (980729)
|
*
|
*
|
* The order of the tests when we find no data waiting are specified
|
* The order of the tests when we find no data waiting are specified
|
* quite explicitly by POSIX 1003.1g, don't change them without having
|
* quite explicitly by POSIX 1003.1g, don't change them without having
|
* the standard around please.
|
* the standard around please.
|
*/
|
*/
|
|
|
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err)
|
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err)
|
{
|
{
|
int error;
|
int error;
|
struct sk_buff *skb;
|
struct sk_buff *skb;
|
long timeo;
|
long timeo;
|
|
|
/* Caller is allowed not to check sk->err before skb_recv_datagram() */
|
/* Caller is allowed not to check sk->err before skb_recv_datagram() */
|
error = sock_error(sk);
|
error = sock_error(sk);
|
if (error)
|
if (error)
|
goto no_packet;
|
goto no_packet;
|
|
|
timeo = sock_rcvtimeo(sk, noblock);
|
timeo = sock_rcvtimeo(sk, noblock);
|
|
|
do {
|
do {
|
/* Again only user level code calls this function, so nothing interrupt level
|
/* Again only user level code calls this function, so nothing interrupt level
|
will suddenly eat the receive_queue.
|
will suddenly eat the receive_queue.
|
|
|
Look at current nfs client by the way...
|
Look at current nfs client by the way...
|
However, this function was corrent in any case. 8)
|
However, this function was corrent in any case. 8)
|
*/
|
*/
|
if (flags & MSG_PEEK)
|
if (flags & MSG_PEEK)
|
{
|
{
|
unsigned long cpu_flags;
|
unsigned long cpu_flags;
|
|
|
spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags);
|
spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags);
|
skb = skb_peek(&sk->receive_queue);
|
skb = skb_peek(&sk->receive_queue);
|
if(skb!=NULL)
|
if(skb!=NULL)
|
atomic_inc(&skb->users);
|
atomic_inc(&skb->users);
|
spin_unlock_irqrestore(&sk->receive_queue.lock, cpu_flags);
|
spin_unlock_irqrestore(&sk->receive_queue.lock, cpu_flags);
|
} else
|
} else
|
skb = skb_dequeue(&sk->receive_queue);
|
skb = skb_dequeue(&sk->receive_queue);
|
|
|
if (skb)
|
if (skb)
|
return skb;
|
return skb;
|
|
|
/* User doesn't want to wait */
|
/* User doesn't want to wait */
|
error = -EAGAIN;
|
error = -EAGAIN;
|
if (!timeo)
|
if (!timeo)
|
goto no_packet;
|
goto no_packet;
|
|
|
} while (wait_for_packet(sk, err, &timeo) == 0);
|
} while (wait_for_packet(sk, err, &timeo) == 0);
|
|
|
return NULL;
|
return NULL;
|
|
|
no_packet:
|
no_packet:
|
*err = error;
|
*err = error;
|
return NULL;
|
return NULL;
|
}
|
}
|
|
|
void skb_free_datagram(struct sock * sk, struct sk_buff *skb)
|
void skb_free_datagram(struct sock * sk, struct sk_buff *skb)
|
{
|
{
|
kfree_skb(skb);
|
kfree_skb(skb);
|
}
|
}
|
|
|
/*
|
/*
|
* Copy a datagram to a linear buffer.
|
* Copy a datagram to a linear buffer.
|
*/
|
*/
|
|
|
int skb_copy_datagram(const struct sk_buff *skb, int offset, char *to, int size)
|
int skb_copy_datagram(const struct sk_buff *skb, int offset, char *to, int size)
|
{
|
{
|
struct iovec iov = { to, size };
|
struct iovec iov = { to, size };
|
|
|
return skb_copy_datagram_iovec(skb, offset, &iov, size);
|
return skb_copy_datagram_iovec(skb, offset, &iov, size);
|
}
|
}
|
|
|
/*
|
/*
|
* Copy a datagram to an iovec.
|
* Copy a datagram to an iovec.
|
* Note: the iovec is modified during the copy.
|
* Note: the iovec is modified during the copy.
|
*/
|
*/
|
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to,
|
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to,
|
int len)
|
int len)
|
{
|
{
|
int i, copy;
|
int i, copy;
|
int start = skb->len - skb->data_len;
|
int start = skb->len - skb->data_len;
|
|
|
/* Copy header. */
|
/* Copy header. */
|
if ((copy = start-offset) > 0) {
|
if ((copy = start-offset) > 0) {
|
if (copy > len)
|
if (copy > len)
|
copy = len;
|
copy = len;
|
if (memcpy_toiovec(to, skb->data + offset, copy))
|
if (memcpy_toiovec(to, skb->data + offset, copy))
|
goto fault;
|
goto fault;
|
if ((len -= copy) == 0)
|
if ((len -= copy) == 0)
|
return 0;
|
return 0;
|
offset += copy;
|
offset += copy;
|
}
|
}
|
|
|
/* Copy paged appendix. Hmm... why does this look so complicated? */
|
/* Copy paged appendix. Hmm... why does this look so complicated? */
|
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
|
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
|
int end;
|
int end;
|
|
|
BUG_TRAP(start <= offset+len);
|
BUG_TRAP(start <= offset+len);
|
|
|
end = start + skb_shinfo(skb)->frags[i].size;
|
end = start + skb_shinfo(skb)->frags[i].size;
|
if ((copy = end-offset) > 0) {
|
if ((copy = end-offset) > 0) {
|
int err;
|
int err;
|
u8 *vaddr;
|
u8 *vaddr;
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
struct page *page = frag->page;
|
struct page *page = frag->page;
|
|
|
if (copy > len)
|
if (copy > len)
|
copy = len;
|
copy = len;
|
vaddr = kmap(page);
|
vaddr = kmap(page);
|
err = memcpy_toiovec(to, vaddr + frag->page_offset +
|
err = memcpy_toiovec(to, vaddr + frag->page_offset +
|
offset-start, copy);
|
offset-start, copy);
|
kunmap(page);
|
kunmap(page);
|
if (err)
|
if (err)
|
goto fault;
|
goto fault;
|
if (!(len -= copy))
|
if (!(len -= copy))
|
return 0;
|
return 0;
|
offset += copy;
|
offset += copy;
|
}
|
}
|
start = end;
|
start = end;
|
}
|
}
|
|
|
if (skb_shinfo(skb)->frag_list) {
|
if (skb_shinfo(skb)->frag_list) {
|
struct sk_buff *list;
|
struct sk_buff *list;
|
|
|
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
|
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
|
int end;
|
int end;
|
|
|
BUG_TRAP(start <= offset+len);
|
BUG_TRAP(start <= offset+len);
|
|
|
end = start + list->len;
|
end = start + list->len;
|
if ((copy = end-offset) > 0) {
|
if ((copy = end-offset) > 0) {
|
if (copy > len)
|
if (copy > len)
|
copy = len;
|
copy = len;
|
if (skb_copy_datagram_iovec(list, offset-start, to, copy))
|
if (skb_copy_datagram_iovec(list, offset-start, to, copy))
|
goto fault;
|
goto fault;
|
if ((len -= copy) == 0)
|
if ((len -= copy) == 0)
|
return 0;
|
return 0;
|
offset += copy;
|
offset += copy;
|
}
|
}
|
start = end;
|
start = end;
|
}
|
}
|
}
|
}
|
if (len == 0)
|
if (len == 0)
|
return 0;
|
return 0;
|
|
|
fault:
|
fault:
|
return -EFAULT;
|
return -EFAULT;
|
}
|
}
|
|
|
int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump)
|
int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump)
|
{
|
{
|
int i, copy;
|
int i, copy;
|
int start = skb->len - skb->data_len;
|
int start = skb->len - skb->data_len;
|
int pos = 0;
|
int pos = 0;
|
|
|
/* Copy header. */
|
/* Copy header. */
|
if ((copy = start-offset) > 0) {
|
if ((copy = start-offset) > 0) {
|
int err = 0;
|
int err = 0;
|
if (copy > len)
|
if (copy > len)
|
copy = len;
|
copy = len;
|
*csump = csum_and_copy_to_user(skb->data+offset, to, copy, *csump, &err);
|
*csump = csum_and_copy_to_user(skb->data+offset, to, copy, *csump, &err);
|
if (err)
|
if (err)
|
goto fault;
|
goto fault;
|
if ((len -= copy) == 0)
|
if ((len -= copy) == 0)
|
return 0;
|
return 0;
|
offset += copy;
|
offset += copy;
|
to += copy;
|
to += copy;
|
pos = copy;
|
pos = copy;
|
}
|
}
|
|
|
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
|
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
|
int end;
|
int end;
|
|
|
BUG_TRAP(start <= offset+len);
|
BUG_TRAP(start <= offset+len);
|
|
|
end = start + skb_shinfo(skb)->frags[i].size;
|
end = start + skb_shinfo(skb)->frags[i].size;
|
if ((copy = end-offset) > 0) {
|
if ((copy = end-offset) > 0) {
|
unsigned int csum2;
|
unsigned int csum2;
|
int err = 0;
|
int err = 0;
|
u8 *vaddr;
|
u8 *vaddr;
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
struct page *page = frag->page;
|
struct page *page = frag->page;
|
|
|
if (copy > len)
|
if (copy > len)
|
copy = len;
|
copy = len;
|
vaddr = kmap(page);
|
vaddr = kmap(page);
|
csum2 = csum_and_copy_to_user(vaddr + frag->page_offset +
|
csum2 = csum_and_copy_to_user(vaddr + frag->page_offset +
|
offset-start, to, copy, 0, &err);
|
offset-start, to, copy, 0, &err);
|
kunmap(page);
|
kunmap(page);
|
if (err)
|
if (err)
|
goto fault;
|
goto fault;
|
*csump = csum_block_add(*csump, csum2, pos);
|
*csump = csum_block_add(*csump, csum2, pos);
|
if (!(len -= copy))
|
if (!(len -= copy))
|
return 0;
|
return 0;
|
offset += copy;
|
offset += copy;
|
to += copy;
|
to += copy;
|
pos += copy;
|
pos += copy;
|
}
|
}
|
start = end;
|
start = end;
|
}
|
}
|
|
|
if (skb_shinfo(skb)->frag_list) {
|
if (skb_shinfo(skb)->frag_list) {
|
struct sk_buff *list;
|
struct sk_buff *list;
|
|
|
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
|
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
|
int end;
|
int end;
|
|
|
BUG_TRAP(start <= offset+len);
|
BUG_TRAP(start <= offset+len);
|
|
|
end = start + list->len;
|
end = start + list->len;
|
if ((copy = end-offset) > 0) {
|
if ((copy = end-offset) > 0) {
|
unsigned int csum2 = 0;
|
unsigned int csum2 = 0;
|
if (copy > len)
|
if (copy > len)
|
copy = len;
|
copy = len;
|
if (skb_copy_and_csum_datagram(list, offset-start, to, copy, &csum2))
|
if (skb_copy_and_csum_datagram(list, offset-start, to, copy, &csum2))
|
goto fault;
|
goto fault;
|
*csump = csum_block_add(*csump, csum2, pos);
|
*csump = csum_block_add(*csump, csum2, pos);
|
if ((len -= copy) == 0)
|
if ((len -= copy) == 0)
|
return 0;
|
return 0;
|
offset += copy;
|
offset += copy;
|
to += copy;
|
to += copy;
|
pos += copy;
|
pos += copy;
|
}
|
}
|
start = end;
|
start = end;
|
}
|
}
|
}
|
}
|
if (len == 0)
|
if (len == 0)
|
return 0;
|
return 0;
|
|
|
fault:
|
fault:
|
return -EFAULT;
|
return -EFAULT;
|
}
|
}
|
|
|
/* Copy and checkum skb to user iovec. Caller _must_ check that
|
/* Copy and checkum skb to user iovec. Caller _must_ check that
|
skb will fit to this iovec.
|
skb will fit to this iovec.
|
|
|
Returns: 0 - success.
|
Returns: 0 - success.
|
-EINVAL - checksum failure.
|
-EINVAL - checksum failure.
|
-EFAULT - fault during copy. Beware, in this case iovec can be
|
-EFAULT - fault during copy. Beware, in this case iovec can be
|
modified!
|
modified!
|
*/
|
*/
|
|
|
int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov)
|
int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov)
|
{
|
{
|
unsigned int csum;
|
unsigned int csum;
|
int chunk = skb->len - hlen;
|
int chunk = skb->len - hlen;
|
|
|
/* Skip filled elements. Pretty silly, look at memcpy_toiovec, though 8) */
|
/* Skip filled elements. Pretty silly, look at memcpy_toiovec, though 8) */
|
while (iov->iov_len == 0)
|
while (iov->iov_len == 0)
|
iov++;
|
iov++;
|
|
|
if (iov->iov_len < chunk) {
|
if (iov->iov_len < chunk) {
|
if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk+hlen, skb->csum)))
|
if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk+hlen, skb->csum)))
|
goto csum_error;
|
goto csum_error;
|
if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
|
if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
|
goto fault;
|
goto fault;
|
} else {
|
} else {
|
csum = csum_partial(skb->data, hlen, skb->csum);
|
csum = csum_partial(skb->data, hlen, skb->csum);
|
if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum))
|
if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum))
|
goto fault;
|
goto fault;
|
if ((unsigned short)csum_fold(csum))
|
if ((unsigned short)csum_fold(csum))
|
goto csum_error;
|
goto csum_error;
|
iov->iov_len -= chunk;
|
iov->iov_len -= chunk;
|
iov->iov_base += chunk;
|
iov->iov_base += chunk;
|
}
|
}
|
return 0;
|
return 0;
|
|
|
csum_error:
|
csum_error:
|
return -EINVAL;
|
return -EINVAL;
|
|
|
fault:
|
fault:
|
return -EFAULT;
|
return -EFAULT;
|
}
|
}
|
|
|
|
|
|
|
/*
|
/*
|
* Datagram poll: Again totally generic. This also handles
|
* Datagram poll: Again totally generic. This also handles
|
* sequenced packet sockets providing the socket receive queue
|
* sequenced packet sockets providing the socket receive queue
|
* is only ever holding data ready to receive.
|
* is only ever holding data ready to receive.
|
*
|
*
|
* Note: when you _don't_ use this routine for this protocol,
|
* Note: when you _don't_ use this routine for this protocol,
|
* and you use a different write policy from sock_writeable()
|
* and you use a different write policy from sock_writeable()
|
* then please supply your own write_space callback.
|
* then please supply your own write_space callback.
|
*/
|
*/
|
|
|
unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *wait)
|
unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *wait)
|
{
|
{
|
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
unsigned int mask;
|
unsigned int mask;
|
|
|
poll_wait(file, sk->sleep, wait);
|
poll_wait(file, sk->sleep, wait);
|
mask = 0;
|
mask = 0;
|
|
|
/* exceptional events? */
|
/* exceptional events? */
|
if (sk->err || !skb_queue_empty(&sk->error_queue))
|
if (sk->err || !skb_queue_empty(&sk->error_queue))
|
mask |= POLLERR;
|
mask |= POLLERR;
|
if (sk->shutdown == SHUTDOWN_MASK)
|
if (sk->shutdown == SHUTDOWN_MASK)
|
mask |= POLLHUP;
|
mask |= POLLHUP;
|
|
|
/* readable? */
|
/* readable? */
|
if (!skb_queue_empty(&sk->receive_queue) || (sk->shutdown&RCV_SHUTDOWN))
|
if (!skb_queue_empty(&sk->receive_queue) || (sk->shutdown&RCV_SHUTDOWN))
|
mask |= POLLIN | POLLRDNORM;
|
mask |= POLLIN | POLLRDNORM;
|
|
|
/* Connection-based need to check for termination and startup */
|
/* Connection-based need to check for termination and startup */
|
if (connection_based(sk)) {
|
if (connection_based(sk)) {
|
if (sk->state==TCP_CLOSE)
|
if (sk->state==TCP_CLOSE)
|
mask |= POLLHUP;
|
mask |= POLLHUP;
|
/* connection hasn't started yet? */
|
/* connection hasn't started yet? */
|
if (sk->state == TCP_SYN_SENT)
|
if (sk->state == TCP_SYN_SENT)
|
return mask;
|
return mask;
|
}
|
}
|
|
|
/* writable? */
|
/* writable? */
|
if (sock_writeable(sk))
|
if (sock_writeable(sk))
|
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
|
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
|
else
|
else
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
|
|
return mask;
|
return mask;
|
}
|
}
|
|
|