int error;
struct sctp_transport *transport = (struct sctp_transport *) peer;
struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
/* Check whether a task is in the sock. */
- sctp_bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ sctp_bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
/* Try again later. */
transport, GFP_ATOMIC);
if (error)
- asoc->base.sk->sk_err = -error;
+ sk->sk_err = -error;
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ sctp_bh_unlock_sock(sk);
sctp_transport_put(transport);
}
static void sctp_generate_timeout_event(struct sctp_association *asoc,
sctp_event_timeout_t timeout_type)
{
+ struct sock *sk = asoc->base.sk;
int error = 0;
- sctp_bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ sctp_bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n",
__func__,
timeout_type);
(void *)timeout_type, GFP_ATOMIC);
if (error)
- asoc->base.sk->sk_err = -error;
+ sk->sk_err = -error;
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ sctp_bh_unlock_sock(sk);
sctp_association_put(asoc);
}
int error = 0;
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
- sctp_bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ sctp_bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
/* Try again later. */
transport, GFP_ATOMIC);
if (error)
- asoc->base.sk->sk_err = -error;
+ sk->sk_err = -error;
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ sctp_bh_unlock_sock(sk);
sctp_transport_put(transport);
}
{
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
- sctp_bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ sctp_bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
/* Try again later. */
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ sctp_bh_unlock_sock(sk);
sctp_association_put(asoc);
}
* outstanding data and rely on the retransmission limit be reached
* to shutdown the association.
*/
- if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
t->asoc->overall_error_count = 0;
/* Clear the hb_sent flag to signal that we had a good
* This way the whole message is queued up and bundling if
* encouraged for small fragments.
*/
-static int sctp_cmd_send_msg(struct sctp_association *asoc,
- struct sctp_datamsg *msg)
+static void sctp_cmd_send_msg(struct sctp_association *asoc,
+ struct sctp_datamsg *msg)
{
struct sctp_chunk *chunk;
- int error = 0;
- list_for_each_entry(chunk, &msg->chunks, frag_list) {
- error = sctp_outq_tail(&asoc->outqueue, chunk);
- if (error)
- break;
- }
-
- return error;
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_outq_tail(&asoc->outqueue, chunk);
}
case SCTP_CMD_PROCESS_CTSN:
/* Dummy up a SACK for processing. */
sackh.cum_tsn_ack = cmd->obj.be32;
- sackh.a_rwnd = asoc->peer.rwnd +
- asoc->outqueue.outstanding_bytes;
+ sackh.a_rwnd = htonl(asoc->peer.rwnd +
+ asoc->outqueue.outstanding_bytes);
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
sctp_outq_cork(&asoc->outqueue);
local_cork = 1;
}
- error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
+ sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);