#include <asm/pcic.h>
#include <asm/timer.h>
#include <asm/uaccess.h>
+#include <asm/irq_regs.h>
unsigned int pcic_pin_to_irq(unsigned int pin, char *name);
.index = -1,
};
-int obp_system_intr(void)
-{
- if (boot_flags & BOOTME_DEBUG) {
- printk("OBP: system interrupted\n");
- prom_halt();
- return 1;
- }
- return 0;
-}
-
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
. = ALIGN(4096);
__init_begin = .;
+ _sinittext = .;
.init.text : {
- _sinittext = .;
*(.init.text)
- _einittext = .;
}
+ _einittext = .;
__init_text_end = .;
.init.data : { *(.init.data) }
. = ALIGN(16);
srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
}
-void srmmu_nocache_init(void)
+void __init srmmu_nocache_init(void)
{
unsigned int bitmap_bits;
pgd_t *pgd;
{
}
-int obp_system_intr(void)
-{
- if (boot_flags & BOOTME_DEBUG) {
- printk("OBP: system interrupted\n");
- prom_halt();
- return 1;
- }
- return 0;
-}
-
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
}
if (old_vector >= 0) {
int old_cpu;
- for_each_cpu_mask(old_cpu, domain)
+ for_each_cpu_mask(old_cpu, irq_domain[irq])
per_cpu(vector_irq, old_cpu)[old_vector] = -1;
}
for_each_cpu_mask(new_cpu, domain)
return 0;
}
-static struct console sunzilog_console = {
+static struct console sunzilog_console_ops = {
.name = "ttyS",
.write = sunzilog_console_write,
.device = uart_console_device,
if (i == NUM_CHANNELS)
return NULL;
- sunzilog_console.index = i;
+ sunzilog_console_ops.index = i;
sunzilog_port_table[i].flags |= SUNZILOG_FLAG_IS_CONS;
- return &sunzilog_console;
+ return &sunzilog_console_ops;
}
#else
* Check permission when a flow selects a xfrm_policy for processing
* XFRMs on a packet. The hook is called when selecting either a
* per-socket policy or a generic xfrm policy.
- * Return 0 if permission is granted.
+ * Return 0 if permission is granted, -ESRCH otherwise, or -errno
+ * on other errors.
* @xfrm_state_pol_flow_match:
* @x contains the state to match.
* @xp contains the policy to check for a match.
* @xfrm_flow_state_match:
* @fl contains the flow key to match.
* @xfrm points to the xfrm_state to match.
+ * @xp points to the xfrm_policy to match.
* Return 1 if there is a match.
* @xfrm_decode_session:
* @skb points to skb to decode.
int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
struct xfrm_policy *xp, struct flowi *fl);
- int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm);
+ int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm,
+ struct xfrm_policy *xp);
int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
return security_ops->xfrm_policy_alloc_security(xp, sec_ctx, NULL);
}
-static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk)
-{
- return security_ops->xfrm_policy_alloc_security(xp, NULL, sk);
-}
-
static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new)
{
return security_ops->xfrm_policy_clone_security(old, new);
return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
}
-static inline int security_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm)
+static inline int security_xfrm_flow_state_match(struct flowi *fl,
+ struct xfrm_state *xfrm, struct xfrm_policy *xp)
{
- return security_ops->xfrm_flow_state_match(fl, xfrm);
+ return security_ops->xfrm_flow_state_match(fl, xfrm, xp);
}
static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
return 0;
}
-static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk)
-{
- return 0;
-}
-
static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new)
{
return 0;
}
static inline int security_xfrm_flow_state_match(struct flowi *fl,
- struct xfrm_state *xfrm)
+ struct xfrm_state *xfrm, struct xfrm_policy *xp)
{
return 1;
}
#define FLOW_DIR_FWD 2
struct sock;
-typedef void (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir,
+typedef int (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir,
void **objp, atomic_t **obj_refp);
extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
{
if (atomic_dec_and_test(&tw->tw_refcnt)) {
struct module *owner = tw->tw_prot->owner;
+ twsk_destructor((struct sock *)tw);
#ifdef SOCK_REFCNT_DEBUG
printk(KERN_DEBUG "%s timewait_sock %p released\n",
tw->tw_prot->name, tw);
#include <linux/net.h>
#include <linux/skbuff.h>
#include <net/netlink.h>
+#include <asm/atomic.h>
/*
* NetLabel - A management interface for maintaining network packet label
/* LSM security attributes */
struct netlbl_lsm_cache {
+ atomic_t refcount;
void (*free) (const void *data);
void *data;
};
unsigned char *mls_cat;
size_t mls_cat_len;
- struct netlbl_lsm_cache cache;
+ struct netlbl_lsm_cache *cache;
};
/*
*/
+/**
+ * netlbl_secattr_cache_alloc - Allocate and initialize a secattr cache
+ * @flags: the memory allocation flags
+ *
+ * Description:
+ * Allocate and initialize a netlbl_lsm_cache structure. Returns a pointer
+ * on success, NULL on failure.
+ *
+ */
+static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(int flags)
+{
+ struct netlbl_lsm_cache *cache;
+
+ cache = kzalloc(sizeof(*cache), flags);
+ if (cache)
+ atomic_set(&cache->refcount, 1);
+ return cache;
+}
+
+/**
+ * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct
+ * @cache: the struct to free
+ *
+ * Description:
+ * Frees @secattr including all of the internal buffers.
+ *
+ */
+static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
+{
+ if (!atomic_dec_and_test(&cache->refcount))
+ return;
+
+ if (cache->free)
+ cache->free(cache->data);
+ kfree(cache);
+}
+
/**
* netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct
* @secattr: the struct to initialize
/**
* netlbl_secattr_destroy - Clears a netlbl_lsm_secattr struct
* @secattr: the struct to clear
- * @clear_cache: cache clear flag
*
* Description:
* Destroys the @secattr struct, including freeing all of the internal buffers.
- * If @clear_cache is true then free the cache fields, otherwise leave them
- * intact. The struct must be reset with a call to netlbl_secattr_init()
- * before reuse.
+ * The struct must be reset with a call to netlbl_secattr_init() before reuse.
*
*/
-static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr,
- u32 clear_cache)
+static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr)
{
- if (clear_cache && secattr->cache.data != NULL && secattr->cache.free)
- secattr->cache.free(secattr->cache.data);
+ if (secattr->cache)
+ netlbl_secattr_cache_free(secattr->cache);
kfree(secattr->domain);
kfree(secattr->mls_cat);
}
/**
* netlbl_secattr_free - Frees a netlbl_lsm_secattr struct
* @secattr: the struct to free
- * @clear_cache: cache clear flag
*
* Description:
- * Frees @secattr including all of the internal buffers. If @clear_cache is
- * true then free the cache fields, otherwise leave them intact.
+ * Frees @secattr including all of the internal buffers.
*
*/
-static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr,
- u32 clear_cache)
+static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr)
{
- netlbl_secattr_destroy(secattr, clear_cache);
+ netlbl_secattr_destroy(secattr);
kfree(secattr);
}
void sctp_write_space(struct sock *sk);
unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
+void sctp_sock_rfree(struct sk_buff *skb);
/*
* sctp/primitive.c
return result;
}
+/* SCTP version of skb_set_owner_r. We need this one because
+ * of the way we have to do receive buffer accounting on bundled
+ * chunks.
+ */
+static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ skb->sk = sk;
+ skb->destructor = sctp_sock_rfree;
+ atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
+}
+
/* Tests if the list has one and only one entry. */
static inline int sctp_list_single_entry(struct list_head *head)
{
__u32 cumtsn;
int msg_flags;
int iif;
+ unsigned int rmem_len;
};
/* Retrieve the skb this event sits inside of. */
unsigned int twsk_obj_size;
int (*twsk_unique)(struct sock *sk,
struct sock *sktw, void *twp);
+ void (*twsk_destructor)(struct sock *sk);
};
static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
return 0;
}
+static inline void twsk_destructor(struct sock *sk)
+{
+ if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
+ sk->sk_prot->twsk_prot->twsk_destructor(sk);
+}
+
#endif /* _TIMEWAIT_SOCK_H */
int create, unsigned short family);
extern void xfrm_policy_flush(u8 type);
extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
-extern int xfrm_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl, int family, int strict);
+extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
+ struct flowi *fl, int family, int strict);
extern void xfrm_init_pmtu(struct dst_entry *dst);
extern wait_queue_head_t km_waitq;
if (i > 0) {
int cmlen = CMSG_COMPAT_LEN(i * sizeof(int));
- if (!err)
- err = put_user(SOL_SOCKET, &cm->cmsg_level);
+ err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
add_timer(&flow_hash_rnd_timer);
}
+static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
+{
+ if (fle->object)
+ atomic_dec(fle->object_ref);
+ kmem_cache_free(flow_cachep, fle);
+ flow_count(cpu)--;
+}
+
static void __flow_cache_shrink(int cpu, int shrink_to)
{
struct flow_cache_entry *fle, **flp;
}
while ((fle = *flp) != NULL) {
*flp = fle->next;
- if (fle->object)
- atomic_dec(fle->object_ref);
- kmem_cache_free(flow_cachep, fle);
- flow_count(cpu)--;
+ flow_entry_kill(cpu, fle);
}
}
}
nocache:
{
+ int err;
void *obj;
atomic_t *obj_ref;
- resolver(key, family, dir, &obj, &obj_ref);
+ err = resolver(key, family, dir, &obj, &obj_ref);
if (fle) {
- fle->genid = atomic_read(&flow_cache_genid);
-
- if (fle->object)
- atomic_dec(fle->object_ref);
-
- fle->object = obj;
- fle->object_ref = obj_ref;
- if (obj)
- atomic_inc(fle->object_ref);
+ if (err) {
+ /* Force security policy check on next lookup */
+ *head = fle->next;
+ flow_entry_kill(cpu, fle);
+ } else {
+ fle->genid = atomic_read(&flow_cache_genid);
+
+ if (fle->object)
+ atomic_dec(fle->object_ref);
+
+ fle->object = obj;
+ fle->object_ref = obj_ref;
+ if (obj)
+ atomic_inc(fle->object_ref);
+ }
}
local_bh_enable();
+ if (err)
+ obj = ERR_PTR(err);
return obj;
}
}
goto errout;
}
- err = rtnl_unicast(skb, NETLINK_CB(skb).pid);
+ err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
errout:
kfree(iw_buf);
dev_put(dev);
if (i > 0)
{
int cmlen = CMSG_LEN(i*sizeof(int));
- if (!err)
- err = put_user(SOL_SOCKET, &cm->cmsg_level);
+ err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
}
if (sk->sk_state == DCCP_TIME_WAIT) {
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(nsk);
return nsk;
}
- inet_twsk_put((struct inet_timewait_sock *)nsk);
+ inet_twsk_put(inet_twsk(nsk));
return NULL;
}
goto discard_it;
do_time_wait:
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
goto no_dccp_socket;
}
}
if (sk->sk_state == DCCP_TIME_WAIT) {
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(nsk);
return nsk;
}
- inet_twsk_put((struct inet_timewait_sock *)nsk);
+ inet_twsk_put(inet_twsk(nsk));
return NULL;
}
goto discard_it;
do_time_wait:
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
goto no_dccp_socket;
}
if (peer) {
if ((sock->state != SS_CONNECTED &&
sock->state != SS_CONNECTING) &&
- scp->accept_mode == ACC_IMMED)
+ scp->accept_mode == ACC_IMMED) {
+ release_sock(sk);
return -ENOTCONN;
+ }
memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
} else {
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
- return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 &&
- fl1->oif == fl2->oif &&
- fl1->iif == fl2->iif;
+ return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) |
+ (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) |
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ (fl1->nl_u.dn_u.fwmark ^ fl2->nl_u.dn_u.fwmark) |
+#endif
+ (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) |
+ (fl1->oif ^ fl2->oif) |
+ (fl1->iif ^ fl2->iif)) == 0;
}
static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
#include <net/tcp.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
+#include <asm/atomic.h>
#include <asm/bug.h>
struct cipso_v4_domhsh_entry {
unsigned char *key;
size_t key_len;
- struct netlbl_lsm_cache lsm_data;
+ struct netlbl_lsm_cache *lsm_data;
u32 activity;
struct list_head list;
* @entry: the entry to free
*
* Description:
- * This function frees the memory associated with a cache entry.
+ * This function frees the memory associated with a cache entry including the
+ * LSM cache data if there are no longer any users, i.e. reference count == 0.
*
*/
static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
{
- if (entry->lsm_data.free)
- entry->lsm_data.free(entry->lsm_data.data);
+ if (entry->lsm_data)
+ netlbl_secattr_cache_free(entry->lsm_data);
kfree(entry->key);
kfree(entry);
}
entry->key_len == key_len &&
memcmp(entry->key, key, key_len) == 0) {
entry->activity += 1;
- secattr->cache.free = entry->lsm_data.free;
- secattr->cache.data = entry->lsm_data.data;
+ atomic_inc(&entry->lsm_data->refcount);
+ secattr->cache = entry->lsm_data;
if (prev_entry == NULL) {
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
memcpy(entry->key, cipso_ptr, cipso_ptr_len);
entry->key_len = cipso_ptr_len;
entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
- entry->lsm_data.free = secattr->cache.free;
- entry->lsm_data.data = secattr->cache.data;
+ atomic_inc(&secattr->cache->refcount);
+ entry->lsm_data = secattr->cache;
bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (flags == 0 &&
- skb->protocol == __constant_htons(ETH_P_WCCP)) {
- skb->protocol = __constant_htons(ETH_P_IP);
+ skb->protocol == htons(ETH_P_WCCP)) {
+ skb->protocol = htons(ETH_P_IP);
if ((*(h + offset) & 0xF0) != 0x40)
offset += 4;
}
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
- return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 &&
- fl1->oif == fl2->oif &&
- fl1->iif == fl2->iif;
+ return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+ (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ (fl1->nl_u.ip4_u.fwmark ^ fl2->nl_u.ip4_u.fwmark) |
+#endif
+ (*(u16 *)&fl1->nl_u.ip4_u.tos ^
+ *(u16 *)&fl2->nl_u.ip4_u.tos) |
+ (fl1->oif ^ fl2->oif) |
+ (fl1->iif ^ fl2->iif)) == 0;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
return;
}
struct tcphdr *th = skb->h.th;
struct {
struct tcphdr th;
- u32 tsopt[3];
+ u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2];
} rep;
struct ip_reply_arg arg;
bh_lock_sock(nsk);
return nsk;
}
- inet_twsk_put((struct inet_timewait_sock *)nsk);
+ inet_twsk_put(inet_twsk(nsk));
return NULL;
}
do_time_wait:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- inet_twsk_put((struct inet_timewait_sock *) sk);
+ inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(TCP_MIB_INERRS);
- inet_twsk_put((struct inet_timewait_sock *) sk);
+ inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
- switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
- skb, th)) {
+ switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
skb->nh.iph->daddr,
th->dest,
inet_iif(skb));
if (sk2) {
- inet_twsk_deschedule((struct inet_timewait_sock *)sk,
- &tcp_death_row);
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
+ inet_twsk_put(inet_twsk(sk));
sk = sk2;
goto process;
}
__u32 tstamp)
{
if (tp->rx_opt.tstamp_ok) {
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) |
- TCPOLEN_TIMESTAMP);
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
*ptr++ = htonl(tstamp);
*ptr++ = htonl(tp->rx_opt.ts_recent);
}
*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
if (ts) {
if(sack)
- *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+ *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
+ (TCPOLEN_SACK_PERM << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
else
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
*ptr++ = htonl(tstamp); /* TSVAL */
*ptr++ = htonl(ts_recent); /* TSECR */
} else if(sack)
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_SACK_PERM << 8) |
+ TCPOLEN_SACK_PERM);
if (offer_wscale)
- *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_WINDOW << 16) |
+ (TCPOLEN_WINDOW << 8) |
+ (wscale));
}
/* This routine actually transmits TCP packets queued in by
xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
xdst->u.rt.fl.fl4_src == fl->fl4_src &&
xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
- xfrm_bundle_ok(xdst, fl, AF_INET, 0)) {
+ xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
dst_clone(dst);
break;
}
---help---
Support for MIPv6 route optimization mode.
+config IPV6_SIT
+ tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
+ depends on IPV6
+ default y
+ ---help---
+ Tunneling means encapsulating data of one protocol type within
+ another protocol and sending it over a channel that understands the
+ encapsulating protocol. This driver implements encapsulation of IPv6
+ into IPv4 packets. This is useful if you want to connect two IPv6
+ networks over an IPv4-only path.
+
+ Saying M here will produce a module called sit.ko. If unsure, say Y.
+
config IPV6_TUNNEL
tristate "IPv6: IPv6-in-IPv6 tunnel"
select INET6_TUNNEL
obj-$(CONFIG_IPV6) += ipv6.o
-ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \
+ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
obj-$(CONFIG_NETFILTER) += netfilter/
+obj-$(CONFIG_IPV6_SIT) += sit.o
obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
obj-y += exthdrs_core.o
ndev->regen_timer.data = (unsigned long) ndev;
if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL ||
- dev->type == ARPHRD_NONE ||
- dev->type == ARPHRD_SIT) {
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+ dev->type == ARPHRD_SIT ||
+#endif
+ dev->type == ARPHRD_NONE) {
printk(KERN_INFO
"%s: Disabled Privacy Extensions\n",
dev->name);
This thing is done here expecting that the whole
class of non-broadcast devices need not cloning.
*/
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
cfg.fc_flags |= RTF_NONEXTHOP;
+#endif
ip6_route_add(&cfg);
}
ip6_route_add(&cfg);
}
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
static void sit_route_add(struct net_device *dev)
{
struct fib6_config cfg = {
/* prefix length - 96 bits "::d.d.d.d" */
ip6_route_add(&cfg);
}
+#endif
static void addrconf_add_lroute(struct net_device *dev)
{
if (dev == NULL)
goto err_exit;
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
if (dev->type == ARPHRD_SIT) {
struct ifreq ifr;
mm_segment_t oldfs;
err = dev_open(dev);
}
}
+#endif
err_exit:
rtnl_unlock();
return err;
}
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
static void sit_add_v4_addrs(struct inet6_dev *idev)
{
struct inet6_ifaddr * ifp;
}
}
}
+#endif
static void init_loopback(struct net_device *dev)
{
addrconf_add_linklocal(idev, &addr);
}
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
static void addrconf_sit_config(struct net_device *dev)
{
struct inet6_dev *idev;
} else
sit_route_add(dev);
}
+#endif
static inline int
ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
}
switch(dev->type) {
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
case ARPHRD_SIT:
addrconf_sit_config(dev);
break;
+#endif
case ARPHRD_TUNNEL6:
addrconf_ip6_tnl_config(dev);
break;
err = addrconf_init();
if (err)
goto addrconf_fail;
- sit_init();
/* Init v6 extension headers. */
ipv6_rthdr_init();
mip6_fini();
#endif
/* Cleanup code parts. */
- sit_cleanup();
ip6_flowlabel_cleanup();
addrconf_cleanup();
ip6_route_cleanup();
inet_del_protocol(&sit_protocol, IPPROTO_IPV6);
goto out;
}
+
+module_init(sit_init);
+module_exit(sit_cleanup);
}
if (sk->sk_state == TCP_TIME_WAIT) {
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
return;
}
int tot_len = sizeof(struct tcphdr);
if (ts)
- tot_len += 3*4;
+ tot_len += TCPOLEN_TSTAMP_ALIGNED;
buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
GFP_ATOMIC);
bh_lock_sock(nsk);
return nsk;
}
- inet_twsk_put((struct inet_timewait_sock *)nsk);
+ inet_twsk_put(inet_twsk(nsk));
return NULL;
}
do_time_wait:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(TCP_MIB_INERRS);
- inet_twsk_put((struct inet_timewait_sock *)sk);
+ inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
- switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
- skb, th)) {
+ switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
case TCP_TW_SYN:
{
struct sock *sk2;
xdst->u.rt6.rt6i_src.plen);
if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) &&
ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) &&
- xfrm_bundle_ok(xdst, fl, AF_INET6,
+ xfrm_bundle_ok(policy, xdst, fl, AF_INET6,
(xdst->u.rt6.rt6i_dst.plen != 128 ||
xdst->u.rt6.rt6i_src.plen != 128))) {
dst_clone(dst);
if (*dir)
goto out;
}
- else {
- *dir = security_xfrm_sock_policy_alloc(xp, sk);
- if (*dir)
- goto out;
- }
*dir = pol->sadb_x_policy_dir-1;
return xp;
int netlbl_cache_add(const struct sk_buff *skb,
const struct netlbl_lsm_secattr *secattr)
{
- if (secattr->cache.data == NULL)
+ if (secattr->cache == NULL)
return -ENOMSG;
if (CIPSO_V4_OPTEXIST(skb))
for (i = 0; i < 500; i++) {
struct htb_class *cl;
long diff;
- struct rb_node *p = q->wait_pq[level].rb_node;
+ struct rb_node *p = rb_first(&q->wait_pq[level]);
+
if (!p)
return 0;
- while (p->rb_left)
- p = p->rb_left;
cl = rb_entry(p, struct htb_class, pq_node);
if (time_after(cl->pq_key, q->jiffies)) {
assoc, sk, sctp_sk(sk)->type, sk->sk_state,
assoc->state, hash, assoc->assoc_id,
assoc->sndbuf_used,
- (sk->sk_rcvbuf - assoc->rwnd),
+ atomic_read(&assoc->rmem_alloc),
sock_i_uid(sk), sock_i_ino(sk),
epb->bind_addr.port,
assoc->peer.port);
sctp_association_put(asoc);
}
+/* Do accounting for the receive space on the socket.
+ * Accounting for the association is done in ulpevent.c
+ * We set this as a destructor for the cloned data skbs so that
+ * accounting is done at the correct time.
+ */
+void sctp_sock_rfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
+}
+
+
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- sock_rfree(skb);
+ sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
- skb_set_owner_r(skb, newsk);
+ sctp_skb_set_owner_r(skb, newsk);
}
}
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- sock_rfree(skb);
+ sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
- skb_set_owner_r(skb, newsk);
+ sctp_skb_set_owner_r(skb, newsk);
}
}
/* Initialize an ULP event from an given skb. */
-SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags)
+SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event,
+ int msg_flags,
+ unsigned int len)
{
memset(event, 0, sizeof(struct sctp_ulpevent));
event->msg_flags = msg_flags;
+ event->rmem_len = len;
}
/* Create a new sctp_ulpevent. */
goto fail;
event = sctp_skb2event(skb);
- sctp_ulpevent_init(event, msg_flags);
+ sctp_ulpevent_init(event, msg_flags, skb->truesize);
return event;
sctp_association_hold((struct sctp_association *)asoc);
skb = sctp_event2skb(event);
event->asoc = (struct sctp_association *)asoc;
- atomic_add(skb->truesize, &event->asoc->rmem_alloc);
- skb_set_owner_r(skb, asoc->base.sk);
+ atomic_add(event->rmem_len, &event->asoc->rmem_alloc);
+ sctp_skb_set_owner_r(skb, asoc->base.sk);
}
/* A simple destructor to give up the reference to the association. */
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
{
struct sctp_association *asoc = event->asoc;
- struct sk_buff *skb = sctp_event2skb(event);
- atomic_sub(skb->truesize, &asoc->rmem_alloc);
+ atomic_sub(event->rmem_len, &asoc->rmem_alloc);
sctp_association_put(asoc);
}
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
- sctp_ulpevent_init(event, MSG_NOTIFICATION);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
sre = (struct sctp_remote_error *)
skb_push(skb, sizeof(struct sctp_remote_error));
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
- sctp_ulpevent_init(event, MSG_NOTIFICATION);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
ssf = (struct sctp_send_failed *)
skb_push(skb, sizeof(struct sctp_send_failed));
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
- /* Initialize event with flags 0. */
- sctp_ulpevent_init(event, 0);
+ /* Initialize event with flags 0 and correct length
+ * Since this is a clone of the original skb, only account for
+ * the data of this chunk as other chunks will be accounted separately.
+ */
+ sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
sctp_ulpevent_receive_data(event, asoc);
if (!new)
return NULL; /* try again later */
- new->sk = f_frag->sk;
+ sctp_skb_set_owner_r(new, f_frag->sk);
skb_shinfo(new)->frag_list = pos;
} else
}
EXPORT_SYMBOL(xfrm_policy_walk);
-/* Find policy to apply to this flow. */
-
+/*
+ * Find policy to apply to this flow.
+ *
+ * Returns 0 if policy found, else an -errno.
+ */
static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
u8 type, u16 family, int dir)
{
struct xfrm_selector *sel = &pol->selector;
- int match;
+ int match, ret = -ESRCH;
if (pol->family != family ||
pol->type != type)
- return 0;
+ return ret;
match = xfrm_selector_match(sel, fl, family);
- if (match) {
- if (!security_xfrm_policy_lookup(pol, fl->secid, dir))
- return 1;
- }
+ if (match)
+ ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
- return 0;
+ return ret;
}
static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
u16 family, u8 dir)
{
+ int err;
struct xfrm_policy *pol, *ret;
xfrm_address_t *daddr, *saddr;
struct hlist_node *entry;
chain = policy_hash_direct(daddr, saddr, family, dir);
ret = NULL;
hlist_for_each_entry(pol, entry, chain, bydst) {
- if (xfrm_policy_match(pol, fl, type, family, dir)) {
+ err = xfrm_policy_match(pol, fl, type, family, dir);
+ if (err) {
+ if (err == -ESRCH)
+ continue;
+ else {
+ ret = ERR_PTR(err);
+ goto fail;
+ }
+ } else {
ret = pol;
priority = ret->priority;
break;
}
chain = &xfrm_policy_inexact[dir];
hlist_for_each_entry(pol, entry, chain, bydst) {
- if (xfrm_policy_match(pol, fl, type, family, dir) &&
- pol->priority < priority) {
+ err = xfrm_policy_match(pol, fl, type, family, dir);
+ if (err) {
+ if (err == -ESRCH)
+ continue;
+ else {
+ ret = ERR_PTR(err);
+ goto fail;
+ }
+ } else if (pol->priority < priority) {
ret = pol;
break;
}
}
if (ret)
xfrm_pol_hold(ret);
+fail:
read_unlock_bh(&xfrm_policy_lock);
return ret;
}
-static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
+static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
void **objp, atomic_t **obj_refp)
{
struct xfrm_policy *pol;
+ int err = 0;
#ifdef CONFIG_XFRM_SUB_POLICY
pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
- if (pol)
+ if (IS_ERR(pol)) {
+ err = PTR_ERR(pol);
+ pol = NULL;
+ }
+ if (pol || err)
goto end;
#endif
pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
-
+ if (IS_ERR(pol)) {
+ err = PTR_ERR(pol);
+ pol = NULL;
+ }
#ifdef CONFIG_XFRM_SUB_POLICY
end:
#endif
if ((*objp = (void *) pol) != NULL)
*obj_refp = &pol->refcnt;
+ return err;
}
static inline int policy_to_flow_dir(int dir)
sk->sk_family);
int err = 0;
- if (match)
- err = security_xfrm_policy_lookup(pol, fl->secid, policy_to_flow_dir(dir));
-
- if (match && !err)
- xfrm_pol_hold(pol);
- else
+ if (match) {
+ err = security_xfrm_policy_lookup(pol, fl->secid,
+ policy_to_flow_dir(dir));
+ if (!err)
+ xfrm_pol_hold(pol);
+ else if (err == -ESRCH)
+ pol = NULL;
+ else
+ pol = ERR_PTR(err);
+ } else
pol = NULL;
}
read_unlock_bh(&xfrm_policy_lock);
pol_dead = 0;
xfrm_nr = 0;
- if (sk && sk->sk_policy[1])
+ if (sk && sk->sk_policy[1]) {
policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ if (IS_ERR(policy))
+ return PTR_ERR(policy);
+ }
if (!policy) {
/* To accelerate a bit... */
policy = flow_cache_lookup(fl, dst_orig->ops->family,
dir, xfrm_policy_lookup);
+ if (IS_ERR(policy))
+ return PTR_ERR(policy);
}
if (!policy)
fl, family,
XFRM_POLICY_OUT);
if (pols[1]) {
+ if (IS_ERR(pols[1])) {
+ err = PTR_ERR(pols[1]);
+ goto error;
+ }
if (pols[1]->action == XFRM_POLICY_BLOCK) {
err = -EPERM;
goto error;
}
pol = NULL;
- if (sk && sk->sk_policy[dir])
+ if (sk && sk->sk_policy[dir]) {
pol = xfrm_sk_policy_lookup(sk, dir, &fl);
+ if (IS_ERR(pol))
+ return 0;
+ }
if (!pol)
pol = flow_cache_lookup(&fl, family, fl_dir,
xfrm_policy_lookup);
+ if (IS_ERR(pol))
+ return 0;
+
if (!pol) {
if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
xfrm_secpath_reject(xerr_idx, skb, &fl);
&fl, family,
XFRM_POLICY_IN);
if (pols[1]) {
+ if (IS_ERR(pols[1]))
+ return 0;
pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec;
npols ++;
}
static int stale_bundle(struct dst_entry *dst)
{
- return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
+ return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
}
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
* still valid.
*/
-int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int strict)
+int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
+ struct flowi *fl, int family, int strict)
{
struct dst_entry *dst = &first->u.dst;
struct xfrm_dst *last;
if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
return 0;
- if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm))
+ if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol))
return 0;
if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0;
xp->type = XFRM_POLICY_TYPE_MAIN;
copy_templates(xp, ut, nr);
- if (!xp->security) {
- int err = security_xfrm_sock_policy_alloc(xp, sk);
- if (err) {
- kfree(xp);
- *dir = err;
- return NULL;
- }
- }
-
*dir = p->dir;
return xp;
return 1;
}
-static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm)
+static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
+ struct xfrm_policy *xp)
{
return 1;
}
int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp, struct flowi *fl);
-int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm);
+int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
+ struct xfrm_policy *xp);
/*
c = c->next;
ocontext_destroy(ctmp,i);
}
+ p->ocontexts[i] = NULL;
}
g = p->genfs;
g = g->next;
kfree(gtmp);
}
+ p->genfs = NULL;
cond_policydb_destroy(p);
*/
static void selinux_netlbl_cache_free(const void *data)
{
- struct netlbl_cache *cache = NETLBL_CACHE(data);
+ struct netlbl_cache *cache;
+
+ if (data == NULL)
+ return;
+
+ cache = NETLBL_CACHE(data);
switch (cache->type) {
case NETLBL_CACHE_T_MLS:
ebitmap_destroy(&cache->data.mls_label.level[0].cat);
struct netlbl_lsm_secattr secattr;
netlbl_secattr_init(&secattr);
+ secattr.cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
+ if (secattr.cache == NULL)
+ goto netlbl_cache_add_return;
cache = kzalloc(sizeof(*cache), GFP_ATOMIC);
if (cache == NULL)
- goto netlbl_cache_add_failure;
- secattr.cache.free = selinux_netlbl_cache_free;
- secattr.cache.data = (void *)cache;
+ goto netlbl_cache_add_return;
+ secattr.cache->free = selinux_netlbl_cache_free;
+ secattr.cache->data = (void *)cache;
cache->type = NETLBL_CACHE_T_MLS;
if (ebitmap_cpy(&cache->data.mls_label.level[0].cat,
&ctx->range.level[0].cat) != 0)
- goto netlbl_cache_add_failure;
+ goto netlbl_cache_add_return;
cache->data.mls_label.level[1].cat.highbit =
cache->data.mls_label.level[0].cat.highbit;
cache->data.mls_label.level[1].cat.node =
cache->data.mls_label.level[0].sens = ctx->range.level[0].sens;
cache->data.mls_label.level[1].sens = ctx->range.level[0].sens;
- if (netlbl_cache_add(skb, &secattr) != 0)
- goto netlbl_cache_add_failure;
+ netlbl_cache_add(skb, &secattr);
- return;
-
-netlbl_cache_add_failure:
- netlbl_secattr_destroy(&secattr, 1);
+netlbl_cache_add_return:
+ netlbl_secattr_destroy(&secattr);
}
/**
POLICY_RDLOCK;
- if (secattr->cache.data) {
- cache = NETLBL_CACHE(secattr->cache.data);
+ if (secattr->cache) {
+ cache = NETLBL_CACHE(secattr->cache->data);
switch (cache->type) {
case NETLBL_CACHE_T_SID:
*sid = cache->data.sid;
selinux_netlbl_cache_add(skb, &ctx_new);
ebitmap_destroy(&ctx_new.range.level[0].cat);
} else {
- *sid = SECINITSID_UNLABELED;
+ *sid = SECSID_NULL;
rc = 0;
}
&secattr,
base_sid,
sid);
- netlbl_secattr_destroy(&secattr, 0);
+ netlbl_secattr_destroy(&secattr);
return rc;
}
if (rc == 0)
sksec->nlbl_state = NLBL_LABELED;
- netlbl_secattr_destroy(&secattr, 0);
+ netlbl_secattr_destroy(&secattr);
netlbl_socket_setsid_return:
POLICY_RDUNLOCK;
if (netlbl_sock_getattr(sk, &secattr) == 0 &&
selinux_netlbl_secattr_to_sid(NULL,
&secattr,
- sksec->sid,
+ SECINITSID_UNLABELED,
&nlbl_peer_sid) == 0)
sksec->peer_sid = nlbl_peer_sid;
- netlbl_secattr_destroy(&secattr, 0);
+ netlbl_secattr_destroy(&secattr);
sksec->nlbl_state = NLBL_REQUIRE;
if (rc != 0)
return SECSID_NULL;
- if (peer_sid == SECINITSID_UNLABELED)
- return SECSID_NULL;
-
return peer_sid;
}
u32 netlbl_sid;
u32 recv_perm;
- rc = selinux_netlbl_skbuff_getsid(skb, SECINITSID_NETMSG, &netlbl_sid);
+ rc = selinux_netlbl_skbuff_getsid(skb,
+ SECINITSID_UNLABELED,
+ &netlbl_sid);
if (rc != 0)
return rc;
- if (netlbl_sid == SECINITSID_UNLABELED)
+ if (netlbl_sid == SECSID_NULL)
return 0;
switch (sksec->sclass) {
u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock)
{
struct sk_security_struct *sksec = sock->sk->sk_security;
-
- if (sksec->peer_sid == SECINITSID_UNLABELED)
- return SECSID_NULL;
-
return sksec->peer_sid;
}
u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb)
{
int peer_sid;
- struct sock *sk = skb->sk;
- struct inode_security_struct *isec;
- if (sk == NULL || sk->sk_socket == NULL)
- return SECSID_NULL;
-
- isec = SOCK_INODE(sk->sk_socket)->i_security;
- if (selinux_netlbl_skbuff_getsid(skb, isec->sid, &peer_sid) != 0)
- return SECSID_NULL;
- if (peer_sid == SECINITSID_UNLABELED)
+ if (selinux_netlbl_skbuff_getsid(skb,
+ SECINITSID_UNLABELED,
+ &peer_sid) != 0)
return SECSID_NULL;
return peer_sid;
*/
int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir)
{
- int rc = 0;
- u32 sel_sid = SECINITSID_UNLABELED;
+ int rc;
+ u32 sel_sid;
struct xfrm_sec_ctx *ctx;
/* Context sid is either set to label or ANY_ASSOC */
sel_sid = ctx->ctx_sid;
}
+ else
+ /*
+ * All flows should be treated as polmatch'ing an
+ * otherwise applicable "non-labeled" policy. This
+ * would prevent inadvertent "leaks".
+ */
+ return 0;
rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION,
ASSOCIATION__POLMATCH,
NULL);
+ if (rc == -EACCES)
+ rc = -ESRCH;
+
return rc;
}
u32 pol_sid;
int err;
- if (x->security)
- state_sid = x->security->ctx_sid;
- else
- state_sid = SECINITSID_UNLABELED;
-
- if (xp->security)
+ if (xp->security) {
+ if (!x->security)
+ /* unlabeled SA and labeled policy can't match */
+ return 0;
+ else
+ state_sid = x->security->ctx_sid;
pol_sid = xp->security->ctx_sid;
- else
- pol_sid = SECINITSID_UNLABELED;
+ } else
+ if (x->security)
+ /* unlabeled policy and labeled SA can't match */
+ return 0;
+ else
+ /* unlabeled policy and unlabeled SA match all flows */
+ return 1;
err = avc_has_perm(state_sid, pol_sid, SECCLASS_ASSOCIATION,
ASSOCIATION__POLMATCH,
if (err)
return 0;
- return selinux_xfrm_flow_state_match(fl, x);
+ err = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION,
+ ASSOCIATION__SENDTO,
+ NULL)? 0:1;
+
+ return err;
}
/*
* can use a given security association.
*/
-int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm)
+int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
+ struct xfrm_policy *xp)
{
int rc = 0;
u32 sel_sid = SECINITSID_UNLABELED;
struct xfrm_sec_ctx *ctx;
+ if (!xp->security)
+ if (!xfrm->security)
+ return 1;
+ else
+ return 0;
+ else
+ if (!xfrm->security)
+ return 0;
+
/* Context sid is either set to label or ANY_ASSOC */
if ((ctx = xfrm->security)) {
if (!selinux_authorizable_ctx(ctx))