aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
Commit message (Expand)AuthorAge
...
| | * | HWMON: coretemp, suspend fixRafael J. Wysocki2007-12-04
* | | | Merge git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6Linus Torvalds2007-12-06
|\ \ \ \ | |/ / / |/| | |
| * | | [PARISC] lba_pci: pci_claim_resources disabled expansion romsKyle McMartin2007-12-06
| |/ /
* | | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6Linus Torvalds2007-12-05
|\ \ \
| * | | [LRO]: fix lro_gen_skb() alignmentAndrew Gallatin2007-12-05
* | | | gpio_cs5535: disable AUX on outputBen Gardner2007-12-05
* | | | Blackfin SPI driver: reconfigure speed_hz and bits_per_word in each spi transferBryan Wu2007-12-05
* | | | Blackfin SPI driver: move hard coded pin_req to board fileBryan Wu2007-12-05
* | | | Blackfin SPI driver: use void __iomem * for regs_baseBryan Wu2007-12-05
* | | | Blackfin SPI driver: use cpu_relax() to replace continue in while busywaitBryan Wu2007-12-05
* | | | spi: spi_bfin: resequence DMA start/stopSonic Zhang2007-12-05
* | | | spi: spi_bfin: update handling of delay-after-deselectBryan Wu2007-12-05
* | | | spi: spi_bfin: bugfix for 8..16 bit word sizesBryan Wu2007-12-05
* | | | spi: spi_bfin: handle multiple spi_mastersBryan Wu2007-12-05
* | | | spi: spi_bfin: relocate spin/waitsSonic Zhang2007-12-05
* | | | spi: spi_bfin: change handling of communication parametersSonic Zhang2007-12-05
* | | | spi: spi_bfin, rearrange portmux callsSonic Zhang2007-12-05
* | | | spi: spi_bfin uses portmux for additional bussesSonic Zhang2007-12-05
* | | | spi: spi_bfin uses platform device resourcesBryan Wu2007-12-05
* | | | spi: spi_bfin, don't bypass spi frameworkMike Frysinger2007-12-05
* | | | spi: spi_bfin handles spi_transfer.cs_changeBryan Wu2007-12-05
* | | | spi: spi_bfin cleanups, error handlingBryan Wu2007-12-05
* | | | spi: bfin spi uses portmux callsMichael Hennerich2007-12-05
* | | | spi: initial BF54x SPI supportBryan Wu2007-12-05
* | | | spi: use simplified spi_sync() calling conventionMarc Pignat2007-12-05
* | | | spi: simplify spi_sync() calling conventionMarc Pignat2007-12-05
* | | | spi: at25 driver is for EEPROM not FLASHDavid Brownell2007-12-05
* | | | SPI: use mutex not semaphoreDavid Brownell2007-12-05
* | | | RTC: assure proper memory ordering with respect to RTC_DEV_BUSY flagJiri Kosina2007-12-05
|/ / /
* | | Merge branch 'upstream-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/...Linus Torvalds2007-12-04
|\ \ \
| * | | PHY: Add the phy_device_release device method.Anton Vorontsov2007-12-04
| * | | gianfar: fix compile warningGrant Likely2007-12-04
| * | | pasemi_mac: Fix reuse of free'd skbOlof Johansson2007-12-04
| * | | SMC911X: Fix using of dereferenced skb after netif_rxWang Chen2007-12-04
| * | | sky2: recovery deadlock fixStephen Hemminger2007-12-04
| * | | Fix memory corruption in fec_mpc52xxJon Smirl2007-12-04
| * | | Don't claim to do IPv6 checksum offloadDavid Woodhouse2007-12-04
| * | | cxgb - revert file mode changes.Divy Le Ray2007-12-04
* | | | pata_amd/pata_via: de-couple programming of PIO/MWDMA and UDMA timingsBartlomiej Zolnierkiewicz2007-12-04
* | | | ahci: add the Device IDs of MCP79 AHCI controller to ahci.cpeerchen2007-12-04
* | | | sata_mv: Warn about HPT RocketRAID BIOS treatment of "Legacy" drivesMark Lord2007-12-04
* | | | sata_nv: don't use legacy DMA in ADMA mode (v3)Robert Hancock2007-12-04
|/ / /
* | | Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6Linus Torvalds2007-12-04
|\ \ \
| * | | [S390] dcssblk: prevent early access without own make_request functionChristian Borntraeger2007-12-04
| * | | [S390] cio: add missing reprobe loop end statementPeter Oberparleiter2007-12-04
| * | | [S390] cio: Issue SenseID per path.Cornelia Huck2007-12-04
| |/ /
* / / drivers/s390/net/ctcmain.c: fix build bugIngo Molnar2007-12-04
|/ /
* | Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/...Linus Torvalds2007-12-03
|\ \
| * \ Pull bugzilla-9429 into release branchLen Brown2007-12-02
| |\ \
| | * | ACPICA: fix acpi-cpufreq boot crash due to _PSD return-by-referenceBob Moore2007-12-02
ass="hl kwb">static inline int between48(const u64 seq1, const u64 seq2, const u64 seq3) { return (seq3 << 16) - (seq2 << 16) >= (seq1 << 16) - (seq2 << 16); } static inline u64 max48(const u64 seq1, const u64 seq2) { return after48(seq1, seq2) ? seq1 : seq2; } /** * dccp_loss_count - Approximate the number of lost data packets in a burst loss * @s1: last known sequence number before the loss ('hole') * @s2: first sequence number seen after the 'hole' * @ndp: NDP count on packet with sequence number @s2 */ static inline u64 dccp_loss_count(const u64 s1, const u64 s2, const u64 ndp) { s64 delta = dccp_delta_seqno(s1, s2); WARN_ON(delta < 0); delta -= ndp + 1; return delta > 0 ? delta : 0; } /** * dccp_loss_free - Evaluate condition for data loss from RFC 4340, 7.7.1 */ static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp) { return dccp_loss_count(s1, s2, ndp) == 0; } enum { DCCP_MIB_NUM = 0, DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ DCCP_MIB_ESTABRESETS, /* EstabResets */ DCCP_MIB_CURRESTAB, /* CurrEstab */ DCCP_MIB_OUTSEGS, /* OutSegs */ DCCP_MIB_OUTRSTS, DCCP_MIB_ABORTONTIMEOUT, DCCP_MIB_TIMEOUTS, DCCP_MIB_ABORTFAILED, DCCP_MIB_PASSIVEOPENS, DCCP_MIB_ATTEMPTFAILS, DCCP_MIB_OUTDATAGRAMS, DCCP_MIB_INERRS, DCCP_MIB_OPTMANDATORYERROR, DCCP_MIB_INVALIDOPT, __DCCP_MIB_MAX }; #define DCCP_MIB_MAX __DCCP_MIB_MAX struct dccp_mib { unsigned long mibs[DCCP_MIB_MAX]; }; DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) #define __DCCP_INC_STATS(field) __SNMP_INC_STATS(dccp_statistics, field) #define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field) /* * Checksumming routines */ static inline unsigned int dccp_csum_coverage(const struct sk_buff *skb) { const struct dccp_hdr* dh = dccp_hdr(skb); if (dh->dccph_cscov == 0) return skb->len; return (dh->dccph_doff + dh->dccph_cscov - 1) * sizeof(u32); } static inline void dccp_csum_outgoing(struct sk_buff *skb) { unsigned int cov = dccp_csum_coverage(skb); if (cov >= skb->len) dccp_hdr(skb)->dccph_cscov = 0; skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); } void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb); int dccp_retransmit_skb(struct sock *sk); void dccp_send_ack(struct sock *sk); void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *rsk); void dccp_send_sync(struct sock *sk, const u64 seq, const enum dccp_pkt_type pkt_type); /* * TX Packet Dequeueing Interface */ void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb); bool dccp_qpolicy_full(struct sock *sk); void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb); struct sk_buff *dccp_qpolicy_top(struct sock *sk); struct sk_buff *dccp_qpolicy_pop(struct sock *sk); bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param); /* * TX Packet Output and TX Timers */ void dccp_write_xmit(struct sock *sk); void dccp_write_space(struct sock *sk); void dccp_flush_write_queue(struct sock *sk, long *time_budget); void dccp_init_xmit_timers(struct sock *sk); static inline void dccp_clear_xmit_timers(struct sock *sk) { inet_csk_clear_xmit_timers(sk); } unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu); const char *dccp_packet_name(const int type); void dccp_set_state(struct sock *sk, const int state); void dccp_done(struct sock *sk); int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp, struct sk_buff const *skb); int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *dccp_create_openreq_child(const struct sock *sk, const struct request_sock *req, const struct sk_buff *skb); int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req); struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req); int dccp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct dccp_hdr *dh, unsigned int len); int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len); int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); void dccp_destroy_sock(struct sock *sk); void dccp_close(struct sock *sk, long timeout); struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst, struct request_sock *req); int dccp_connect(struct sock *sk); int dccp_disconnect(struct sock *sk, int flags); int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); #ifdef CONFIG_COMPAT int compat_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int compat_dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); #endif int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); void dccp_shutdown(struct sock *sk, int how); int inet_dccp_listen(struct socket *sock, int backlog); __poll_t dccp_poll(struct file *file, struct socket *sock, poll_table *wait); int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); void dccp_req_err(struct sock *sk, u64 seq); struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb); int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); void dccp_send_close(struct sock *sk, const int active); int dccp_invalid_packet(struct sk_buff *skb); u32 dccp_sample_rtt(struct sock *sk, long delta); static inline bool dccp_bad_service_code(const struct sock *sk, const __be32 service) { const struct dccp_sock *dp = dccp_sk(sk); if (dp->dccps_service == service) return false; return !dccp_list_has_service(dp->dccps_service_list, service); } /** * dccp_skb_cb - DCCP per-packet control information * @dccpd_type: one of %dccp_pkt_type (or unknown) * @dccpd_ccval: CCVal field (5.1), see e.g. RFC 4342, 8.1 * @dccpd_reset_code: one of %dccp_reset_codes * @dccpd_reset_data: Data1..3 fields (depend on @dccpd_reset_code) * @dccpd_opt_len: total length of all options (5.8) in the packet * @dccpd_seq: sequence number * @dccpd_ack_seq: acknowledgment number subheader field value * * This is used for transmission as well as for reception. */ struct dccp_skb_cb { union { struct inet_skb_parm h4; #if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; __u8 dccpd_type:4; __u8 dccpd_ccval:4; __u8 dccpd_reset_code, dccpd_reset_data[3]; __u16 dccpd_opt_len; __u64 dccpd_seq; __u64 dccpd_ack_seq; }; #define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0])) /* RFC 4340, sec. 7.7 */ static inline int dccp_non_data_packet(const struct sk_buff *skb) { const __u8 type = DCCP_SKB_CB(skb)->dccpd_type; return type == DCCP_PKT_ACK || type == DCCP_PKT_CLOSE || type == DCCP_PKT_CLOSEREQ || type == DCCP_PKT_RESET || type == DCCP_PKT_SYNC || type == DCCP_PKT_SYNCACK; } /* RFC 4340, sec. 7.7 */ static inline int dccp_data_packet(const struct sk_buff *skb) { const __u8 type = DCCP_SKB_CB(skb)->dccpd_type; return type == DCCP_PKT_DATA || type == DCCP_PKT_DATAACK || type == DCCP_PKT_REQUEST || type == DCCP_PKT_RESPONSE; } static inline int dccp_packet_without_ack(const struct sk_buff *skb) { const __u8 type = DCCP_SKB_CB(skb)->dccpd_type; return type == DCCP_PKT_DATA || type == DCCP_PKT_REQUEST; } #define DCCP_PKT_WITHOUT_ACK_SEQ (UINT48_MAX << 2) static inline void dccp_hdr_set_seq(struct dccp_hdr *dh, const u64 gss) { struct dccp_hdr_ext *dhx = (struct dccp_hdr_ext *)((void *)dh + sizeof(*dh)); dh->dccph_seq2 = 0; dh->dccph_seq = htons((gss >> 32) & 0xfffff); dhx->dccph_seq_low = htonl(gss & 0xffffffff); } static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack, const u64 gsr) { dhack->dccph_reserved1 = 0; dhack->dccph_ack_nr_high = htons(gsr >> 32); dhack->dccph_ack_nr_low = htonl(gsr & 0xffffffff); } static inline void dccp_update_gsr(struct sock *sk, u64 seq) { struct dccp_sock *dp = dccp_sk(sk); if (after48(seq, dp->dccps_gsr)) dp->dccps_gsr = seq; /* Sequence validity window depends on remote Sequence Window (7.5.1) */ dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); /* * Adjust SWL so that it is not below ISR. In contrast to RFC 4340, * 7.5.1 we perform this check beyond the initial handshake: W/W' are * always > 32, so for the first W/W' packets in the lifetime of a * connection we always have to adjust SWL. * A second reason why we are doing this is that the window depends on * the feature-remote value of Sequence Window: nothing stops the peer * from updating this value while we are busy adjusting SWL for the * first W packets (we would have to count from scratch again then). * Therefore it is safer to always make sure that the Sequence Window * is not artificially extended by a peer who grows SWL downwards by * continually updating the feature-remote Sequence-Window. * If sequence numbers wrap it is bad luck. But that will take a while * (48 bit), and this measure prevents Sequence-number attacks. */ if (before48(dp->dccps_swl, dp->dccps_isr)) dp->dccps_swl = dp->dccps_isr; dp->dccps_swh = ADD48(dp->dccps_gsr, (3 * dp->dccps_r_seq_win) / 4); } static inline void dccp_update_gss(struct sock *sk, u64 seq) { struct dccp_sock *dp = dccp_sk(sk); dp->dccps_gss = seq; /* Ack validity window depends on local Sequence Window value (7.5.1) */ dp->dccps_awl = SUB48(ADD48(dp->dccps_gss, 1), dp->dccps_l_seq_win); /* Adjust AWL so that it is not below ISS - see comment above for SWL */ if (before48(dp->dccps_awl, dp->dccps_iss)) dp->dccps_awl = dp->dccps_iss; dp->dccps_awh = dp->dccps_gss; } static inline int dccp_ackvec_pending(const struct sock *sk) { return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL && !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec); } static inline int dccp_ack_pending(const struct sock *sk) { return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk); } int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val); int dccp_feat_finalise_settings(struct dccp_sock *dp); int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq); int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*, struct sk_buff *skb); int dccp_feat_activate_values(struct sock *sk, struct list_head *fn); void dccp_feat_list_purge(struct list_head *fn_list); int dccp_insert_options(struct sock *sk, struct sk_buff *skb); int dccp_insert_options_rsk(struct dccp_request_sock *, struct sk_buff *); u32 dccp_timestamp(void); void dccp_timestamping_init(void); int dccp_insert_option(struct sk_buff *skb, unsigned char option, const void *value, unsigned char len); #ifdef CONFIG_SYSCTL int dccp_sysctl_init(void); void dccp_sysctl_exit(void); #else static inline int dccp_sysctl_init(void) { return 0; } static inline void dccp_sysctl_exit(void) { } #endif #endif /* _DCCP_H */